ea1c8993067b7590d6f7e327102ff0d6f85cc9ab
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use chain;
15 use chain::{Confirm, Listen, Watch};
16 use chain::channelmonitor;
17 use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
18 use chain::transaction::OutPoint;
19 use chain::keysinterface::{KeysInterface, BaseSign};
20 use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
21 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
22 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
23 use ln::channel::{Channel, ChannelError};
24 use ln::{chan_utils, onion_utils};
25 use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
26 use routing::router::{Route, RouteHop, RouteHint, RouteHintHop, get_route, get_keysend_route};
27 use routing::network_graph::RoutingFees;
28 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
29 use ln::msgs;
30 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction};
31 use ln::script::ShutdownScript;
32 use util::enforcing_trait_impls::EnforcingSigner;
33 use util::{byte_utils, test_utils};
34 use util::test_utils::OnGetShutdownScriptpubkey;
35 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
36 use util::errors::APIError;
37 use util::ser::{Writeable, ReadableArgs};
38 use util::config::UserConfig;
39
40 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
41 use bitcoin::hash_types::{Txid, BlockHash};
42 use bitcoin::blockdata::block::{Block, BlockHeader};
43 use bitcoin::blockdata::script::Builder;
44 use bitcoin::blockdata::opcodes;
45 use bitcoin::blockdata::constants::genesis_block;
46 use bitcoin::network::constants::Network;
47
48 use bitcoin::hashes::sha256::Hash as Sha256;
49 use bitcoin::hashes::Hash;
50
51 use bitcoin::secp256k1::{Secp256k1, Message};
52 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
53
54 use regex;
55
56 use io;
57 use prelude::*;
58 use alloc::collections::BTreeSet;
59 use core::default::Default;
60 use core::num::NonZeroU8;
61 use sync::{Arc, Mutex};
62
63 use ln::functional_test_utils::*;
64 use ln::chan_utils::CommitmentTransaction;
65 use ln::msgs::OptionalField::Present;
66
67 #[test]
68 fn test_insane_channel_opens() {
69         // Stand up a network of 2 nodes
70         let chanmon_cfgs = create_chanmon_cfgs(2);
71         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
72         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
73         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
74
75         // Instantiate channel parameters where we push the maximum msats given our
76         // funding satoshis
77         let channel_value_sat = 31337; // same as funding satoshis
78         let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
79         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
80
81         // Have node0 initiate a channel to node1 with aforementioned parameters
82         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
83
84         // Extract the channel open message from node0 to node1
85         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
86
87         // Test helper that asserts we get the correct error string given a mutator
88         // that supposedly makes the channel open message insane
89         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
90                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &message_mutator(open_channel_message.clone()));
91                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
92                 assert_eq!(msg_events.len(), 1);
93                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
94                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
95                         match action {
96                                 &ErrorAction::SendErrorMessage { .. } => {
97                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1);
98                                 },
99                                 _ => panic!("unexpected event!"),
100                         }
101                 } else { assert!(false); }
102         };
103
104         use ln::channel::MAX_FUNDING_SATOSHIS;
105         use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
106
107         // Test all mutations that would make the channel open message insane
108         insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
109
110         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
111
112         insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
113
114         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
115
116         insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
117
118         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
119
120         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
121
122         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
123
124         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
125 }
126
127 #[test]
128 fn test_async_inbound_update_fee() {
129         let chanmon_cfgs = create_chanmon_cfgs(2);
130         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
131         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
132         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
133         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
134         let logger = test_utils::TestLogger::new();
135
136         // balancing
137         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
138
139         // A                                        B
140         // update_fee                            ->
141         // send (1) commitment_signed            -.
142         //                                       <- update_add_htlc/commitment_signed
143         // send (2) RAA (awaiting remote revoke) -.
144         // (1) commitment_signed is delivered    ->
145         //                                       .- send (3) RAA (awaiting remote revoke)
146         // (2) RAA is delivered                  ->
147         //                                       .- send (4) commitment_signed
148         //                                       <- (3) RAA is delivered
149         // send (5) commitment_signed            -.
150         //                                       <- (4) commitment_signed is delivered
151         // send (6) RAA                          -.
152         // (5) commitment_signed is delivered    ->
153         //                                       <- RAA
154         // (6) RAA is delivered                  ->
155
156         // First nodes[0] generates an update_fee
157         {
158                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
159                 *feerate_lock += 20;
160         }
161         nodes[0].node.timer_tick_occurred();
162         check_added_monitors!(nodes[0], 1);
163
164         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
165         assert_eq!(events_0.len(), 1);
166         let (update_msg, commitment_signed) = match events_0[0] { // (1)
167                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
168                         (update_fee.as_ref(), commitment_signed)
169                 },
170                 _ => panic!("Unexpected event"),
171         };
172
173         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
174
175         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
176         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
177         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
178         nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap();
179         check_added_monitors!(nodes[1], 1);
180
181         let payment_event = {
182                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
183                 assert_eq!(events_1.len(), 1);
184                 SendEvent::from_event(events_1.remove(0))
185         };
186         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
187         assert_eq!(payment_event.msgs.len(), 1);
188
189         // ...now when the messages get delivered everyone should be happy
190         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
191         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
192         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
193         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
194         check_added_monitors!(nodes[0], 1);
195
196         // deliver(1), generate (3):
197         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
198         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
199         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
200         check_added_monitors!(nodes[1], 1);
201
202         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
203         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
204         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
205         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
206         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
207         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
208         assert!(bs_update.update_fee.is_none()); // (4)
209         check_added_monitors!(nodes[1], 1);
210
211         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
212         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
213         assert!(as_update.update_add_htlcs.is_empty()); // (5)
214         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
215         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
216         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
217         assert!(as_update.update_fee.is_none()); // (5)
218         check_added_monitors!(nodes[0], 1);
219
220         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
221         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
222         // only (6) so get_event_msg's assert(len == 1) passes
223         check_added_monitors!(nodes[0], 1);
224
225         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
226         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
227         check_added_monitors!(nodes[1], 1);
228
229         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
230         check_added_monitors!(nodes[0], 1);
231
232         let events_2 = nodes[0].node.get_and_clear_pending_events();
233         assert_eq!(events_2.len(), 1);
234         match events_2[0] {
235                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
236                 _ => panic!("Unexpected event"),
237         }
238
239         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
240         check_added_monitors!(nodes[1], 1);
241 }
242
243 #[test]
244 fn test_update_fee_unordered_raa() {
245         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
246         // crash in an earlier version of the update_fee patch)
247         let chanmon_cfgs = create_chanmon_cfgs(2);
248         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
249         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
250         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
251         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
252         let logger = test_utils::TestLogger::new();
253
254         // balancing
255         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
256
257         // First nodes[0] generates an update_fee
258         {
259                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
260                 *feerate_lock += 20;
261         }
262         nodes[0].node.timer_tick_occurred();
263         check_added_monitors!(nodes[0], 1);
264
265         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
266         assert_eq!(events_0.len(), 1);
267         let update_msg = match events_0[0] { // (1)
268                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
269                         update_fee.as_ref()
270                 },
271                 _ => panic!("Unexpected event"),
272         };
273
274         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
275
276         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
277         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
278         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
279         nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap();
280         check_added_monitors!(nodes[1], 1);
281
282         let payment_event = {
283                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
284                 assert_eq!(events_1.len(), 1);
285                 SendEvent::from_event(events_1.remove(0))
286         };
287         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
288         assert_eq!(payment_event.msgs.len(), 1);
289
290         // ...now when the messages get delivered everyone should be happy
291         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
292         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
293         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
294         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
295         check_added_monitors!(nodes[0], 1);
296
297         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
298         check_added_monitors!(nodes[1], 1);
299
300         // We can't continue, sadly, because our (1) now has a bogus signature
301 }
302
303 #[test]
304 fn test_multi_flight_update_fee() {
305         let chanmon_cfgs = create_chanmon_cfgs(2);
306         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
307         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
308         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
309         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
310
311         // A                                        B
312         // update_fee/commitment_signed          ->
313         //                                       .- send (1) RAA and (2) commitment_signed
314         // update_fee (never committed)          ->
315         // (3) update_fee                        ->
316         // We have to manually generate the above update_fee, it is allowed by the protocol but we
317         // don't track which updates correspond to which revoke_and_ack responses so we're in
318         // AwaitingRAA mode and will not generate the update_fee yet.
319         //                                       <- (1) RAA delivered
320         // (3) is generated and send (4) CS      -.
321         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
322         // know the per_commitment_point to use for it.
323         //                                       <- (2) commitment_signed delivered
324         // revoke_and_ack                        ->
325         //                                          B should send no response here
326         // (4) commitment_signed delivered       ->
327         //                                       <- RAA/commitment_signed delivered
328         // revoke_and_ack                        ->
329
330         // First nodes[0] generates an update_fee
331         let initial_feerate;
332         {
333                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
334                 initial_feerate = *feerate_lock;
335                 *feerate_lock = initial_feerate + 20;
336         }
337         nodes[0].node.timer_tick_occurred();
338         check_added_monitors!(nodes[0], 1);
339
340         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
341         assert_eq!(events_0.len(), 1);
342         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
343                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
344                         (update_fee.as_ref().unwrap(), commitment_signed)
345                 },
346                 _ => panic!("Unexpected event"),
347         };
348
349         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
350         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
351         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
352         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
353         check_added_monitors!(nodes[1], 1);
354
355         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
356         // transaction:
357         {
358                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
359                 *feerate_lock = initial_feerate + 40;
360         }
361         nodes[0].node.timer_tick_occurred();
362         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
363         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
364
365         // Create the (3) update_fee message that nodes[0] will generate before it does...
366         let mut update_msg_2 = msgs::UpdateFee {
367                 channel_id: update_msg_1.channel_id.clone(),
368                 feerate_per_kw: (initial_feerate + 30) as u32,
369         };
370
371         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
372
373         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
374         // Deliver (3)
375         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
376
377         // Deliver (1), generating (3) and (4)
378         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
379         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
380         check_added_monitors!(nodes[0], 1);
381         assert!(as_second_update.update_add_htlcs.is_empty());
382         assert!(as_second_update.update_fulfill_htlcs.is_empty());
383         assert!(as_second_update.update_fail_htlcs.is_empty());
384         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
385         // Check that the update_fee newly generated matches what we delivered:
386         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
387         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
388
389         // Deliver (2) commitment_signed
390         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
391         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
392         check_added_monitors!(nodes[0], 1);
393         // No commitment_signed so get_event_msg's assert(len == 1) passes
394
395         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
396         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
397         check_added_monitors!(nodes[1], 1);
398
399         // Delever (4)
400         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
401         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
402         check_added_monitors!(nodes[1], 1);
403
404         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
405         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
406         check_added_monitors!(nodes[0], 1);
407
408         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
409         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
410         // No commitment_signed so get_event_msg's assert(len == 1) passes
411         check_added_monitors!(nodes[0], 1);
412
413         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
414         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
415         check_added_monitors!(nodes[1], 1);
416 }
417
418 fn do_test_1_conf_open(connect_style: ConnectStyle) {
419         // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
420         // tests that we properly send one in that case.
421         let mut alice_config = UserConfig::default();
422         alice_config.own_channel_config.minimum_depth = 1;
423         alice_config.channel_options.announced_channel = true;
424         alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
425         let mut bob_config = UserConfig::default();
426         bob_config.own_channel_config.minimum_depth = 1;
427         bob_config.channel_options.announced_channel = true;
428         bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
429         let chanmon_cfgs = create_chanmon_cfgs(2);
430         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
431         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
432         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
433         *nodes[0].connect_style.borrow_mut() = connect_style;
434
435         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
436         mine_transaction(&nodes[1], &tx);
437         nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
438
439         mine_transaction(&nodes[0], &tx);
440         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
441         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
442
443         for node in nodes {
444                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
445                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
446                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
447         }
448 }
449 #[test]
450 fn test_1_conf_open() {
451         do_test_1_conf_open(ConnectStyle::BestBlockFirst);
452         do_test_1_conf_open(ConnectStyle::TransactionsFirst);
453         do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
454 }
455
456 fn do_test_sanity_on_in_flight_opens(steps: u8) {
457         // Previously, we had issues deserializing channels when we hadn't connected the first block
458         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
459         // serialization round-trips and simply do steps towards opening a channel and then drop the
460         // Node objects.
461
462         let chanmon_cfgs = create_chanmon_cfgs(2);
463         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
464         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
465         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
466
467         if steps & 0b1000_0000 != 0{
468                 let block = Block {
469                         header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
470                         txdata: vec![],
471                 };
472                 connect_block(&nodes[0], &block);
473                 connect_block(&nodes[1], &block);
474         }
475
476         if steps & 0x0f == 0 { return; }
477         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
478         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
479
480         if steps & 0x0f == 1 { return; }
481         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
482         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
483
484         if steps & 0x0f == 2 { return; }
485         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
486
487         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
488
489         if steps & 0x0f == 3 { return; }
490         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
491         check_added_monitors!(nodes[0], 0);
492         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
493
494         if steps & 0x0f == 4 { return; }
495         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
496         {
497                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
498                 assert_eq!(added_monitors.len(), 1);
499                 assert_eq!(added_monitors[0].0, funding_output);
500                 added_monitors.clear();
501         }
502         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
503
504         if steps & 0x0f == 5 { return; }
505         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
506         {
507                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
508                 assert_eq!(added_monitors.len(), 1);
509                 assert_eq!(added_monitors[0].0, funding_output);
510                 added_monitors.clear();
511         }
512
513         let events_4 = nodes[0].node.get_and_clear_pending_events();
514         assert_eq!(events_4.len(), 0);
515
516         if steps & 0x0f == 6 { return; }
517         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
518
519         if steps & 0x0f == 7 { return; }
520         confirm_transaction_at(&nodes[0], &tx, 2);
521         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
522         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
523 }
524
525 #[test]
526 fn test_sanity_on_in_flight_opens() {
527         do_test_sanity_on_in_flight_opens(0);
528         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
529         do_test_sanity_on_in_flight_opens(1);
530         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
531         do_test_sanity_on_in_flight_opens(2);
532         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
533         do_test_sanity_on_in_flight_opens(3);
534         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
535         do_test_sanity_on_in_flight_opens(4);
536         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
537         do_test_sanity_on_in_flight_opens(5);
538         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
539         do_test_sanity_on_in_flight_opens(6);
540         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
541         do_test_sanity_on_in_flight_opens(7);
542         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
543         do_test_sanity_on_in_flight_opens(8);
544         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
545 }
546
547 #[test]
548 fn test_update_fee_vanilla() {
549         let chanmon_cfgs = create_chanmon_cfgs(2);
550         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
551         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
552         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
553         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
554
555         {
556                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
557                 *feerate_lock += 25;
558         }
559         nodes[0].node.timer_tick_occurred();
560         check_added_monitors!(nodes[0], 1);
561
562         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
563         assert_eq!(events_0.len(), 1);
564         let (update_msg, commitment_signed) = match events_0[0] {
565                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
566                         (update_fee.as_ref(), commitment_signed)
567                 },
568                 _ => panic!("Unexpected event"),
569         };
570         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
571
572         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
573         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
574         check_added_monitors!(nodes[1], 1);
575
576         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
577         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
578         check_added_monitors!(nodes[0], 1);
579
580         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
581         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
582         // No commitment_signed so get_event_msg's assert(len == 1) passes
583         check_added_monitors!(nodes[0], 1);
584
585         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
586         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
587         check_added_monitors!(nodes[1], 1);
588 }
589
590 #[test]
591 fn test_update_fee_that_funder_cannot_afford() {
592         let chanmon_cfgs = create_chanmon_cfgs(2);
593         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
594         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
595         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
596         let channel_value = 1888;
597         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known());
598         let channel_id = chan.2;
599
600         let feerate = 260;
601         {
602                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
603                 *feerate_lock = feerate;
604         }
605         nodes[0].node.timer_tick_occurred();
606         check_added_monitors!(nodes[0], 1);
607         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
608
609         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
610
611         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
612
613         //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
614         //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
615         {
616                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
617
618                 //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
619                 let num_htlcs = commitment_tx.output.len() - 2;
620                 let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
621                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
622                 actual_fee = channel_value - actual_fee;
623                 assert_eq!(total_fee, actual_fee);
624         }
625
626         //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
627         //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
628         {
629                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
630                 *feerate_lock = feerate + 2;
631         }
632         nodes[0].node.timer_tick_occurred();
633         check_added_monitors!(nodes[0], 1);
634
635         let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
636
637         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap());
638
639         //While producing the commitment_signed response after handling a received update_fee request the
640         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
641         //Should produce and error.
642         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed);
643         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
644         check_added_monitors!(nodes[1], 1);
645         check_closed_broadcast!(nodes[1], true);
646 }
647
648 #[test]
649 fn test_update_fee_with_fundee_update_add_htlc() {
650         let chanmon_cfgs = create_chanmon_cfgs(2);
651         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
652         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
653         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
654         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
655         let logger = test_utils::TestLogger::new();
656
657         // balancing
658         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
659
660         {
661                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
662                 *feerate_lock += 20;
663         }
664         nodes[0].node.timer_tick_occurred();
665         check_added_monitors!(nodes[0], 1);
666
667         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
668         assert_eq!(events_0.len(), 1);
669         let (update_msg, commitment_signed) = match events_0[0] {
670                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
671                         (update_fee.as_ref(), commitment_signed)
672                 },
673                 _ => panic!("Unexpected event"),
674         };
675         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
676         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
677         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
678         check_added_monitors!(nodes[1], 1);
679
680         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
681         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
682         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800000, TEST_FINAL_CLTV, &logger).unwrap();
683
684         // nothing happens since node[1] is in AwaitingRemoteRevoke
685         nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
686         {
687                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
688                 assert_eq!(added_monitors.len(), 0);
689                 added_monitors.clear();
690         }
691         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
692         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
693         // node[1] has nothing to do
694
695         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
696         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
697         check_added_monitors!(nodes[0], 1);
698
699         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
700         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
701         // No commitment_signed so get_event_msg's assert(len == 1) passes
702         check_added_monitors!(nodes[0], 1);
703         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
704         check_added_monitors!(nodes[1], 1);
705         // AwaitingRemoteRevoke ends here
706
707         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
708         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
709         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
710         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
711         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
712         assert_eq!(commitment_update.update_fee.is_none(), true);
713
714         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
715         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
716         check_added_monitors!(nodes[0], 1);
717         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
718
719         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
720         check_added_monitors!(nodes[1], 1);
721         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
722
723         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
724         check_added_monitors!(nodes[1], 1);
725         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
726         // No commitment_signed so get_event_msg's assert(len == 1) passes
727
728         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
729         check_added_monitors!(nodes[0], 1);
730         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
731
732         expect_pending_htlcs_forwardable!(nodes[0]);
733
734         let events = nodes[0].node.get_and_clear_pending_events();
735         assert_eq!(events.len(), 1);
736         match events[0] {
737                 Event::PaymentReceived { .. } => { },
738                 _ => panic!("Unexpected event"),
739         };
740
741         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
742
743         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
744         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
745         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
746 }
747
748 #[test]
749 fn test_update_fee() {
750         let chanmon_cfgs = create_chanmon_cfgs(2);
751         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
752         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
753         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
754         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
755         let channel_id = chan.2;
756
757         // A                                        B
758         // (1) update_fee/commitment_signed      ->
759         //                                       <- (2) revoke_and_ack
760         //                                       .- send (3) commitment_signed
761         // (4) update_fee/commitment_signed      ->
762         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
763         //                                       <- (3) commitment_signed delivered
764         // send (6) revoke_and_ack               -.
765         //                                       <- (5) deliver revoke_and_ack
766         // (6) deliver revoke_and_ack            ->
767         //                                       .- send (7) commitment_signed in response to (4)
768         //                                       <- (7) deliver commitment_signed
769         // revoke_and_ack                        ->
770
771         // Create and deliver (1)...
772         let feerate;
773         {
774                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
775                 feerate = *feerate_lock;
776                 *feerate_lock = feerate + 20;
777         }
778         nodes[0].node.timer_tick_occurred();
779         check_added_monitors!(nodes[0], 1);
780
781         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
782         assert_eq!(events_0.len(), 1);
783         let (update_msg, commitment_signed) = match events_0[0] {
784                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
785                         (update_fee.as_ref(), commitment_signed)
786                 },
787                 _ => panic!("Unexpected event"),
788         };
789         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
790
791         // Generate (2) and (3):
792         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
793         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
794         check_added_monitors!(nodes[1], 1);
795
796         // Deliver (2):
797         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
798         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
799         check_added_monitors!(nodes[0], 1);
800
801         // Create and deliver (4)...
802         {
803                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
804                 *feerate_lock = feerate + 30;
805         }
806         nodes[0].node.timer_tick_occurred();
807         check_added_monitors!(nodes[0], 1);
808         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
809         assert_eq!(events_0.len(), 1);
810         let (update_msg, commitment_signed) = match events_0[0] {
811                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
812                         (update_fee.as_ref(), commitment_signed)
813                 },
814                 _ => panic!("Unexpected event"),
815         };
816
817         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
818         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
819         check_added_monitors!(nodes[1], 1);
820         // ... creating (5)
821         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
822         // No commitment_signed so get_event_msg's assert(len == 1) passes
823
824         // Handle (3), creating (6):
825         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
826         check_added_monitors!(nodes[0], 1);
827         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
828         // No commitment_signed so get_event_msg's assert(len == 1) passes
829
830         // Deliver (5):
831         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
832         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
833         check_added_monitors!(nodes[0], 1);
834
835         // Deliver (6), creating (7):
836         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
837         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
838         assert!(commitment_update.update_add_htlcs.is_empty());
839         assert!(commitment_update.update_fulfill_htlcs.is_empty());
840         assert!(commitment_update.update_fail_htlcs.is_empty());
841         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
842         assert!(commitment_update.update_fee.is_none());
843         check_added_monitors!(nodes[1], 1);
844
845         // Deliver (7)
846         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
847         check_added_monitors!(nodes[0], 1);
848         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
849         // No commitment_signed so get_event_msg's assert(len == 1) passes
850
851         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
852         check_added_monitors!(nodes[1], 1);
853         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
854
855         assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
856         assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
857         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
858 }
859
860 #[test]
861 fn pre_funding_lock_shutdown_test() {
862         // Test sending a shutdown prior to funding_locked after funding generation
863         let chanmon_cfgs = create_chanmon_cfgs(2);
864         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
865         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
866         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
867         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known());
868         mine_transaction(&nodes[0], &tx);
869         mine_transaction(&nodes[1], &tx);
870
871         nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
872         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
873         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
874         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
875         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
876
877         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
878         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
879         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
880         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
881         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
882         assert!(node_0_none.is_none());
883
884         assert!(nodes[0].node.list_channels().is_empty());
885         assert!(nodes[1].node.list_channels().is_empty());
886 }
887
888 #[test]
889 fn updates_shutdown_wait() {
890         // Test sending a shutdown with outstanding updates pending
891         let chanmon_cfgs = create_chanmon_cfgs(3);
892         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
893         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
894         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
895         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
896         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
897         let logger = test_utils::TestLogger::new();
898
899         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
900
901         nodes[0].node.close_channel(&chan_1.2).unwrap();
902         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
903         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
904         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
905         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
906
907         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
908         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
909
910         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
911
912         let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler;
913         let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler;
914         let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
915         let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
916         unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
917         unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
918
919         assert!(nodes[2].node.claim_funds(our_payment_preimage));
920         check_added_monitors!(nodes[2], 1);
921         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
922         assert!(updates.update_add_htlcs.is_empty());
923         assert!(updates.update_fail_htlcs.is_empty());
924         assert!(updates.update_fail_malformed_htlcs.is_empty());
925         assert!(updates.update_fee.is_none());
926         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
927         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
928         expect_payment_forwarded!(nodes[1], Some(1000), false);
929         check_added_monitors!(nodes[1], 1);
930         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
931         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
932
933         assert!(updates_2.update_add_htlcs.is_empty());
934         assert!(updates_2.update_fail_htlcs.is_empty());
935         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
936         assert!(updates_2.update_fee.is_none());
937         assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
938         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
939         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
940
941         let events = nodes[0].node.get_and_clear_pending_events();
942         assert_eq!(events.len(), 1);
943         match events[0] {
944                 Event::PaymentSent { ref payment_preimage } => {
945                         assert_eq!(our_payment_preimage, *payment_preimage);
946                 },
947                 _ => panic!("Unexpected event"),
948         }
949
950         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
951         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
952         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
953         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
954         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
955         assert!(node_0_none.is_none());
956
957         assert!(nodes[0].node.list_channels().is_empty());
958
959         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
960         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
961         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
962         assert!(nodes[1].node.list_channels().is_empty());
963         assert!(nodes[2].node.list_channels().is_empty());
964 }
965
966 #[test]
967 fn htlc_fail_async_shutdown() {
968         // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
969         let chanmon_cfgs = create_chanmon_cfgs(3);
970         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
971         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
972         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
973         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
974         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
975         let logger = test_utils::TestLogger::new();
976
977         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
978         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
979         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
980         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
981         check_added_monitors!(nodes[0], 1);
982         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
983         assert_eq!(updates.update_add_htlcs.len(), 1);
984         assert!(updates.update_fulfill_htlcs.is_empty());
985         assert!(updates.update_fail_htlcs.is_empty());
986         assert!(updates.update_fail_malformed_htlcs.is_empty());
987         assert!(updates.update_fee.is_none());
988
989         nodes[1].node.close_channel(&chan_1.2).unwrap();
990         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
991         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
992         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
993
994         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
995         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
996         check_added_monitors!(nodes[1], 1);
997         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
998         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
999
1000         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1001         assert!(updates_2.update_add_htlcs.is_empty());
1002         assert!(updates_2.update_fulfill_htlcs.is_empty());
1003         assert_eq!(updates_2.update_fail_htlcs.len(), 1);
1004         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
1005         assert!(updates_2.update_fee.is_none());
1006
1007         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
1008         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
1009
1010         expect_payment_failed!(nodes[0], our_payment_hash, false);
1011
1012         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1013         assert_eq!(msg_events.len(), 2);
1014         match msg_events[0] {
1015                 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
1016                         assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
1017                 },
1018                 _ => panic!("Unexpected event"),
1019         }
1020         let node_0_closing_signed = match msg_events[1] {
1021                 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
1022                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1023                         (*msg).clone()
1024                 },
1025                 _ => panic!("Unexpected event"),
1026         };
1027
1028         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1029         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
1030         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1031         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
1032         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
1033         assert!(node_0_none.is_none());
1034
1035         assert!(nodes[0].node.list_channels().is_empty());
1036
1037         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1038         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
1039         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
1040         assert!(nodes[1].node.list_channels().is_empty());
1041         assert!(nodes[2].node.list_channels().is_empty());
1042 }
1043
1044 fn do_test_shutdown_rebroadcast(recv_count: u8) {
1045         // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
1046         // messages delivered prior to disconnect
1047         let chanmon_cfgs = create_chanmon_cfgs(3);
1048         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1049         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1050         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1051         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1052         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1053
1054         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
1055
1056         nodes[1].node.close_channel(&chan_1.2).unwrap();
1057         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1058         if recv_count > 0 {
1059                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
1060                 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1061                 if recv_count > 1 {
1062                         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
1063                 }
1064         }
1065
1066         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1067         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1068
1069         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1070         let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1071         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1072         let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1073
1074         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
1075         let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1076         assert!(node_1_shutdown == node_1_2nd_shutdown);
1077
1078         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
1079         let node_0_2nd_shutdown = if recv_count > 0 {
1080                 let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1081                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
1082                 node_0_2nd_shutdown
1083         } else {
1084                 let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1085                 assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
1086                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
1087                 get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
1088         };
1089         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
1090
1091         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1092         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1093
1094         assert!(nodes[2].node.claim_funds(our_payment_preimage));
1095         check_added_monitors!(nodes[2], 1);
1096         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1097         assert!(updates.update_add_htlcs.is_empty());
1098         assert!(updates.update_fail_htlcs.is_empty());
1099         assert!(updates.update_fail_malformed_htlcs.is_empty());
1100         assert!(updates.update_fee.is_none());
1101         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1102         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1103         expect_payment_forwarded!(nodes[1], Some(1000), false);
1104         check_added_monitors!(nodes[1], 1);
1105         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1106         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1107
1108         assert!(updates_2.update_add_htlcs.is_empty());
1109         assert!(updates_2.update_fail_htlcs.is_empty());
1110         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
1111         assert!(updates_2.update_fee.is_none());
1112         assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
1113         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
1114         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
1115
1116         let events = nodes[0].node.get_and_clear_pending_events();
1117         assert_eq!(events.len(), 1);
1118         match events[0] {
1119                 Event::PaymentSent { ref payment_preimage } => {
1120                         assert_eq!(our_payment_preimage, *payment_preimage);
1121                 },
1122                 _ => panic!("Unexpected event"),
1123         }
1124
1125         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
1126         if recv_count > 0 {
1127                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
1128                 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1129                 assert!(node_1_closing_signed.is_some());
1130         }
1131
1132         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1133         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1134
1135         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1136         let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1137         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1138         if recv_count == 0 {
1139                 // If all closing_signeds weren't delivered we can just resume where we left off...
1140                 let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1141
1142                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish);
1143                 let node_0_msgs = nodes[0].node.get_and_clear_pending_msg_events();
1144                 assert_eq!(node_0_msgs.len(), 2);
1145                 let node_0_2nd_closing_signed = match node_0_msgs[1] {
1146                         MessageSendEvent::SendClosingSigned { ref msg, .. } => {
1147                                 assert_eq!(node_0_closing_signed, *msg);
1148                                 msg.clone()
1149                         },
1150                         _ => panic!(),
1151                 };
1152
1153                 let node_0_3rd_shutdown = match node_0_msgs[0] {
1154                         MessageSendEvent::SendShutdown { ref msg, .. } => {
1155                                 assert_eq!(node_0_2nd_shutdown, *msg);
1156                                 msg.clone()
1157                         },
1158                         _ => panic!(),
1159                 };
1160                 assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
1161
1162                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
1163                 let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1164                 assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
1165
1166                 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
1167                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1168
1169                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
1170
1171                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed);
1172                 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1173                 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
1174                 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
1175                 assert!(node_0_none.is_none());
1176         } else {
1177                 // If one node, however, received + responded with an identical closing_signed we end
1178                 // up erroring and node[0] will try to broadcast its own latest commitment transaction.
1179                 // There isn't really anything better we can do simply, but in the future we might
1180                 // explore storing a set of recently-closed channels that got disconnected during
1181                 // closing_signed and avoiding broadcasting local commitment txn for some timeout to
1182                 // give our counterparty enough time to (potentially) broadcast a cooperative closing
1183                 // transaction.
1184                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1185
1186                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
1187                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1188                 assert_eq!(msg_events.len(), 1);
1189                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
1190                         match action {
1191                                 &ErrorAction::SendErrorMessage { ref msg } => {
1192                                         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
1193                                         assert_eq!(msg.channel_id, chan_1.2);
1194                                 },
1195                                 _ => panic!("Unexpected event!"),
1196                         }
1197                 } else { panic!("Needed SendErrorMessage close"); }
1198
1199                 // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
1200                 // checks it, but in this case nodes[0] didn't ever get a chance to receive a
1201                 // closing_signed so we do it ourselves
1202                 check_closed_broadcast!(nodes[0], false);
1203                 check_added_monitors!(nodes[0], 1);
1204         }
1205
1206         assert!(nodes[0].node.list_channels().is_empty());
1207
1208         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1209         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
1210         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
1211         assert!(nodes[1].node.list_channels().is_empty());
1212         assert!(nodes[2].node.list_channels().is_empty());
1213 }
1214
1215 #[test]
1216 fn test_shutdown_rebroadcast() {
1217         do_test_shutdown_rebroadcast(0);
1218         do_test_shutdown_rebroadcast(1);
1219         do_test_shutdown_rebroadcast(2);
1220 }
1221
1222 #[test]
1223 fn fake_network_test() {
1224         // Simple test which builds a network of ChannelManagers, connects them to each other, and
1225         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1226         let chanmon_cfgs = create_chanmon_cfgs(4);
1227         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1228         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1229         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1230
1231         // Create some initial channels
1232         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1233         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1234         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
1235
1236         // Rebalance the network a bit by relaying one payment through all the channels...
1237         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1238         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1239         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1240         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1241
1242         // Send some more payments
1243         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1244         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1245         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1246
1247         // Test failure packets
1248         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1249         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1250
1251         // Add a new channel that skips 3
1252         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1253
1254         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1255         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1256         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1257         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1258         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1259         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1260         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1261
1262         // Do some rebalance loop payments, simultaneously
1263         let mut hops = Vec::with_capacity(3);
1264         hops.push(RouteHop {
1265                 pubkey: nodes[2].node.get_our_node_id(),
1266                 node_features: NodeFeatures::empty(),
1267                 short_channel_id: chan_2.0.contents.short_channel_id,
1268                 channel_features: ChannelFeatures::empty(),
1269                 fee_msat: 0,
1270                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
1271         });
1272         hops.push(RouteHop {
1273                 pubkey: nodes[3].node.get_our_node_id(),
1274                 node_features: NodeFeatures::empty(),
1275                 short_channel_id: chan_3.0.contents.short_channel_id,
1276                 channel_features: ChannelFeatures::empty(),
1277                 fee_msat: 0,
1278                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
1279         });
1280         hops.push(RouteHop {
1281                 pubkey: nodes[1].node.get_our_node_id(),
1282                 node_features: NodeFeatures::known(),
1283                 short_channel_id: chan_4.0.contents.short_channel_id,
1284                 channel_features: ChannelFeatures::known(),
1285                 fee_msat: 1000000,
1286                 cltv_expiry_delta: TEST_FINAL_CLTV,
1287         });
1288         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1289         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1290         let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1291
1292         let mut hops = Vec::with_capacity(3);
1293         hops.push(RouteHop {
1294                 pubkey: nodes[3].node.get_our_node_id(),
1295                 node_features: NodeFeatures::empty(),
1296                 short_channel_id: chan_4.0.contents.short_channel_id,
1297                 channel_features: ChannelFeatures::empty(),
1298                 fee_msat: 0,
1299                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
1300         });
1301         hops.push(RouteHop {
1302                 pubkey: nodes[2].node.get_our_node_id(),
1303                 node_features: NodeFeatures::empty(),
1304                 short_channel_id: chan_3.0.contents.short_channel_id,
1305                 channel_features: ChannelFeatures::empty(),
1306                 fee_msat: 0,
1307                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
1308         });
1309         hops.push(RouteHop {
1310                 pubkey: nodes[1].node.get_our_node_id(),
1311                 node_features: NodeFeatures::known(),
1312                 short_channel_id: chan_2.0.contents.short_channel_id,
1313                 channel_features: ChannelFeatures::known(),
1314                 fee_msat: 1000000,
1315                 cltv_expiry_delta: TEST_FINAL_CLTV,
1316         });
1317         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1318         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1319         let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1320
1321         // Claim the rebalances...
1322         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1323         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1324
1325         // Add a duplicate new channel from 2 to 4
1326         let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1327
1328         // Send some payments across both channels
1329         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1330         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1331         let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1332
1333
1334         route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
1335         let events = nodes[0].node.get_and_clear_pending_msg_events();
1336         assert_eq!(events.len(), 0);
1337         nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
1338
1339         //TODO: Test that routes work again here as we've been notified that the channel is full
1340
1341         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
1342         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
1343         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
1344
1345         // Close down the channels...
1346         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1347         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1348         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1349         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1350         close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
1351 }
1352
1353 #[test]
1354 fn holding_cell_htlc_counting() {
1355         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1356         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1357         // commitment dance rounds.
1358         let chanmon_cfgs = create_chanmon_cfgs(3);
1359         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1360         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1361         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1362         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1363         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1364         let logger = test_utils::TestLogger::new();
1365
1366         let mut payments = Vec::new();
1367         for _ in 0..::ln::channel::OUR_MAX_HTLCS {
1368                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
1369                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1370                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1371                 nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
1372                 payments.push((payment_preimage, payment_hash));
1373         }
1374         check_added_monitors!(nodes[1], 1);
1375
1376         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1377         assert_eq!(events.len(), 1);
1378         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1379         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1380
1381         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1382         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1383         // another HTLC.
1384         let (_, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[2]);
1385         {
1386                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1387                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1388                 unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable { ref err },
1389                         assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
1390                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1391                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
1392         }
1393
1394         // This should also be true if we try to forward a payment.
1395         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]);
1396         {
1397                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1398                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1399                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1400                 check_added_monitors!(nodes[0], 1);
1401         }
1402
1403         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1404         assert_eq!(events.len(), 1);
1405         let payment_event = SendEvent::from_event(events.pop().unwrap());
1406         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1407
1408         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1409         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1410         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1411         // fails), the second will process the resulting failure and fail the HTLC backward.
1412         expect_pending_htlcs_forwardable!(nodes[1]);
1413         expect_pending_htlcs_forwardable!(nodes[1]);
1414         check_added_monitors!(nodes[1], 1);
1415
1416         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1417         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1418         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1419
1420         expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false);
1421         expect_payment_failed!(nodes[0], payment_hash_2, false);
1422
1423         // Now forward all the pending HTLCs and claim them back
1424         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1425         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1426         check_added_monitors!(nodes[2], 1);
1427
1428         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1429         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1430         check_added_monitors!(nodes[1], 1);
1431         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1432
1433         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1434         check_added_monitors!(nodes[1], 1);
1435         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1436
1437         for ref update in as_updates.update_add_htlcs.iter() {
1438                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1439         }
1440         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1441         check_added_monitors!(nodes[2], 1);
1442         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1443         check_added_monitors!(nodes[2], 1);
1444         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1445
1446         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1447         check_added_monitors!(nodes[1], 1);
1448         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1449         check_added_monitors!(nodes[1], 1);
1450         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1451
1452         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1453         check_added_monitors!(nodes[2], 1);
1454
1455         expect_pending_htlcs_forwardable!(nodes[2]);
1456
1457         let events = nodes[2].node.get_and_clear_pending_events();
1458         assert_eq!(events.len(), payments.len());
1459         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1460                 match event {
1461                         &Event::PaymentReceived { ref payment_hash, .. } => {
1462                                 assert_eq!(*payment_hash, *hash);
1463                         },
1464                         _ => panic!("Unexpected event"),
1465                 };
1466         }
1467
1468         for (preimage, _) in payments.drain(..) {
1469                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1470         }
1471
1472         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1473 }
1474
1475 #[test]
1476 fn duplicate_htlc_test() {
1477         // Test that we accept duplicate payment_hash HTLCs across the network and that
1478         // claiming/failing them are all separate and don't affect each other
1479         let chanmon_cfgs = create_chanmon_cfgs(6);
1480         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1481         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1482         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1483
1484         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1485         create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
1486         create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1487         create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
1488         create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
1489         create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
1490
1491         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1492
1493         *nodes[0].network_payment_count.borrow_mut() -= 1;
1494         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1495
1496         *nodes[0].network_payment_count.borrow_mut() -= 1;
1497         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1498
1499         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1500         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1501         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1502 }
1503
1504 #[test]
1505 fn test_duplicate_htlc_different_direction_onchain() {
1506         // Test that ChannelMonitor doesn't generate 2 preimage txn
1507         // when we have 2 HTLCs with same preimage that go across a node
1508         // in opposite directions, even with the same payment secret.
1509         let chanmon_cfgs = create_chanmon_cfgs(2);
1510         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1511         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1512         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1513
1514         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1515         let logger = test_utils::TestLogger::new();
1516
1517         // balancing
1518         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1519
1520         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1521
1522         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1523         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800_000, TEST_FINAL_CLTV, &logger).unwrap();
1524         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
1525         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1526
1527         // Provide preimage to node 0 by claiming payment
1528         nodes[0].node.claim_funds(payment_preimage);
1529         check_added_monitors!(nodes[0], 1);
1530
1531         // Broadcast node 1 commitment txn
1532         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1533
1534         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1535         let mut has_both_htlcs = 0; // check htlcs match ones committed
1536         for outp in remote_txn[0].output.iter() {
1537                 if outp.value == 800_000 / 1000 {
1538                         has_both_htlcs += 1;
1539                 } else if outp.value == 900_000 / 1000 {
1540                         has_both_htlcs += 1;
1541                 }
1542         }
1543         assert_eq!(has_both_htlcs, 2);
1544
1545         mine_transaction(&nodes[0], &remote_txn[0]);
1546         check_added_monitors!(nodes[0], 1);
1547         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
1548
1549         // Check we only broadcast 1 timeout tx
1550         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1551         assert_eq!(claim_txn.len(), 8);
1552         assert_eq!(claim_txn[1], claim_txn[4]);
1553         assert_eq!(claim_txn[2], claim_txn[5]);
1554         check_spends!(claim_txn[1], chan_1.3);
1555         check_spends!(claim_txn[2], claim_txn[1]);
1556         check_spends!(claim_txn[7], claim_txn[1]);
1557
1558         assert_eq!(claim_txn[0].input.len(), 1);
1559         assert_eq!(claim_txn[3].input.len(), 1);
1560         assert_eq!(claim_txn[0].input[0].previous_output, claim_txn[3].input[0].previous_output);
1561
1562         assert_eq!(claim_txn[0].input.len(), 1);
1563         assert_eq!(claim_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1564         check_spends!(claim_txn[0], remote_txn[0]);
1565         assert_eq!(remote_txn[0].output[claim_txn[0].input[0].previous_output.vout as usize].value, 800);
1566         assert_eq!(claim_txn[6].input.len(), 1);
1567         assert_eq!(claim_txn[6].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1568         check_spends!(claim_txn[6], remote_txn[0]);
1569         assert_eq!(remote_txn[0].output[claim_txn[6].input[0].previous_output.vout as usize].value, 900);
1570
1571         let events = nodes[0].node.get_and_clear_pending_msg_events();
1572         assert_eq!(events.len(), 3);
1573         for e in events {
1574                 match e {
1575                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1576                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
1577                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1578                                 assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
1579                         },
1580                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1581                                 assert!(update_add_htlcs.is_empty());
1582                                 assert!(update_fail_htlcs.is_empty());
1583                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1584                                 assert!(update_fail_malformed_htlcs.is_empty());
1585                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1586                         },
1587                         _ => panic!("Unexpected event"),
1588                 }
1589         }
1590 }
1591
1592 #[test]
1593 fn test_basic_channel_reserve() {
1594         let chanmon_cfgs = create_chanmon_cfgs(2);
1595         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1596         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1597         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1598         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1599         let logger = test_utils::TestLogger::new();
1600
1601         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
1602         let channel_reserve = chan_stat.channel_reserve_msat;
1603
1604         // The 2* and +1 are for the fee spike reserve.
1605         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
1606         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1);
1607         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1608         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1609         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), max_can_send + 1, TEST_FINAL_CLTV, &logger).unwrap();
1610         let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap();
1611         match err {
1612                 PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
1613                         match &fails[0] {
1614                                 &APIError::ChannelUnavailable{ref err} =>
1615                                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
1616                                 _ => panic!("Unexpected error variant"),
1617                         }
1618                 },
1619                 _ => panic!("Unexpected error variant"),
1620         }
1621         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1622         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1623
1624         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1625 }
1626
1627 #[test]
1628 fn test_fee_spike_violation_fails_htlc() {
1629         let chanmon_cfgs = create_chanmon_cfgs(2);
1630         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1631         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1632         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1633         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1634
1635         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3460001);
1636         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1637         let secp_ctx = Secp256k1::new();
1638         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1639
1640         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1641
1642         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1643         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, &Some(payment_secret), cur_height, &None).unwrap();
1644         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1645         let msg = msgs::UpdateAddHTLC {
1646                 channel_id: chan.2,
1647                 htlc_id: 0,
1648                 amount_msat: htlc_msat,
1649                 payment_hash: payment_hash,
1650                 cltv_expiry: htlc_cltv,
1651                 onion_routing_packet: onion_packet,
1652         };
1653
1654         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1655
1656         // Now manually create the commitment_signed message corresponding to the update_add
1657         // nodes[0] just sent. In the code for construction of this message, "local" refers
1658         // to the sender of the message, and "remote" refers to the receiver.
1659
1660         let feerate_per_kw = get_feerate!(nodes[0], chan.2);
1661
1662         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1663
1664         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
1665         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1666         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point) = {
1667                 let chan_lock = nodes[0].node.channel_state.lock().unwrap();
1668                 let local_chan = chan_lock.by_id.get(&chan.2).unwrap();
1669                 let chan_signer = local_chan.get_signer();
1670                 let pubkeys = chan_signer.pubkeys();
1671                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1672                  chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1673                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx))
1674         };
1675         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point) = {
1676                 let chan_lock = nodes[1].node.channel_state.lock().unwrap();
1677                 let remote_chan = chan_lock.by_id.get(&chan.2).unwrap();
1678                 let chan_signer = remote_chan.get_signer();
1679                 let pubkeys = chan_signer.pubkeys();
1680                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1681                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx))
1682         };
1683
1684         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1685         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1686                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
1687
1688         // Build the remote commitment transaction so we can sign it, and then later use the
1689         // signature for the commitment_signed message.
1690         let local_chan_balance = 1313;
1691
1692         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1693                 offered: false,
1694                 amount_msat: 3460001,
1695                 cltv_expiry: htlc_cltv,
1696                 payment_hash,
1697                 transaction_output_index: Some(1),
1698         };
1699
1700         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1701
1702         let res = {
1703                 let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
1704                 let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap();
1705                 let local_chan_signer = local_chan.get_signer();
1706                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1707                         commitment_number,
1708                         95000,
1709                         local_chan_balance,
1710                         commit_tx_keys.clone(),
1711                         feerate_per_kw,
1712                         &mut vec![(accepted_htlc_info, ())],
1713                         &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
1714                 );
1715                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
1716         };
1717
1718         let commit_signed_msg = msgs::CommitmentSigned {
1719                 channel_id: chan.2,
1720                 signature: res.0,
1721                 htlc_signatures: res.1
1722         };
1723
1724         // Send the commitment_signed message to the nodes[1].
1725         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1726         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1727
1728         // Send the RAA to nodes[1].
1729         let raa_msg = msgs::RevokeAndACK {
1730                 channel_id: chan.2,
1731                 per_commitment_secret: local_secret,
1732                 next_per_commitment_point: next_local_point
1733         };
1734         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1735
1736         let events = nodes[1].node.get_and_clear_pending_msg_events();
1737         assert_eq!(events.len(), 1);
1738         // Make sure the HTLC failed in the way we expect.
1739         match events[0] {
1740                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1741                         assert_eq!(update_fail_htlcs.len(), 1);
1742                         update_fail_htlcs[0].clone()
1743                 },
1744                 _ => panic!("Unexpected event"),
1745         };
1746         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1747                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
1748
1749         check_added_monitors!(nodes[1], 2);
1750 }
1751
1752 #[test]
1753 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1754         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1755         // Set the fee rate for the channel very high, to the point where the fundee
1756         // sending any above-dust amount would result in a channel reserve violation.
1757         // In this test we check that we would be prevented from sending an HTLC in
1758         // this situation.
1759         let feerate_per_kw = 253;
1760         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1761         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1762         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1763         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1764         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1765
1766         let mut push_amt = 100_000_000;
1767         push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
1768         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
1769
1770         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
1771
1772         // Sending exactly enough to hit the reserve amount should be accepted
1773         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1774
1775         // However one more HTLC should be significantly over the reserve amount and fail.
1776         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1777         unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
1778                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1779         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1780         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1);
1781 }
1782
1783 #[test]
1784 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1785         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1786         // Set the fee rate for the channel very high, to the point where the funder
1787         // receiving 1 update_add_htlc would result in them closing the channel due
1788         // to channel reserve violation. This close could also happen if the fee went
1789         // up a more realistic amount, but many HTLCs were outstanding at the time of
1790         // the update_add_htlc.
1791         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
1792         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
1793         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1794         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1795         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1796         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1797
1798         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1799         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1800         let secp_ctx = Secp256k1::new();
1801         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1802         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1803         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1804         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap();
1805         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1806         let msg = msgs::UpdateAddHTLC {
1807                 channel_id: chan.2,
1808                 htlc_id: 1,
1809                 amount_msat: htlc_msat + 1,
1810                 payment_hash: payment_hash,
1811                 cltv_expiry: htlc_cltv,
1812                 onion_routing_packet: onion_packet,
1813         };
1814
1815         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1816         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1817         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1818         assert_eq!(nodes[0].node.list_channels().len(), 0);
1819         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1820         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1821         check_added_monitors!(nodes[0], 1);
1822 }
1823
1824 #[test]
1825 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1826         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1827         // calculating our commitment transaction fee (this was previously broken).
1828         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1829         let feerate_per_kw = 253;
1830         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1831         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1832
1833         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1834         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1835         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1836
1837         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1838         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1839         // transaction fee with 0 HTLCs (183 sats)).
1840         let mut push_amt = 100_000_000;
1841         push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000;
1842         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
1843         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
1844
1845         let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
1846                 + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
1847         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1848         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1849         // commitment transaction fee.
1850         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1851
1852         // One more than the dust amt should fail, however.
1853         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
1854         unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
1855                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1856 }
1857
1858 #[test]
1859 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1860         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1861         // calculating our counterparty's commitment transaction fee (this was previously broken).
1862         let chanmon_cfgs = create_chanmon_cfgs(2);
1863         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1864         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1865         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1866         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000, InitFeatures::known(), InitFeatures::known());
1867
1868         let payment_amt = 46000; // Dust amount
1869         // In the previous code, these first four payments would succeed.
1870         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1871         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1872         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1873         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1874
1875         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1876         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1877         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1878         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1879         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1880         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1881
1882         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1883         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1884         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1885         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1886 }
1887
1888 #[test]
1889 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1890         let chanmon_cfgs = create_chanmon_cfgs(3);
1891         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1892         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1893         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1894         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1895         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1896
1897         let feemsat = 239;
1898         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1899         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
1900         let feerate = get_feerate!(nodes[0], chan.2);
1901
1902         // Add a 2* and +1 for the fee spike reserve.
1903         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1);
1904         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1905         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1906
1907         // Add a pending HTLC.
1908         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1909         let payment_event_1 = {
1910                 nodes[0].node.send_payment(&route_1, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
1911                 check_added_monitors!(nodes[0], 1);
1912
1913                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1914                 assert_eq!(events.len(), 1);
1915                 SendEvent::from_event(events.remove(0))
1916         };
1917         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1918
1919         // Attempt to trigger a channel reserve violation --> payment failure.
1920         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2);
1921         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1922         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1923         let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2);
1924
1925         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1926         let secp_ctx = Secp256k1::new();
1927         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1928         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1929         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1930         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route_2.paths[0], recv_value_2, &None, cur_height, &None).unwrap();
1931         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1);
1932         let msg = msgs::UpdateAddHTLC {
1933                 channel_id: chan.2,
1934                 htlc_id: 1,
1935                 amount_msat: htlc_msat + 1,
1936                 payment_hash: our_payment_hash_1,
1937                 cltv_expiry: htlc_cltv,
1938                 onion_routing_packet: onion_packet,
1939         };
1940
1941         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1942         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1943         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1944         assert_eq!(nodes[1].node.list_channels().len(), 1);
1945         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1946         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1947         check_added_monitors!(nodes[1], 1);
1948 }
1949
1950 #[test]
1951 fn test_inbound_outbound_capacity_is_not_zero() {
1952         let chanmon_cfgs = create_chanmon_cfgs(2);
1953         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1954         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1955         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1956         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1957         let channels0 = node_chanmgrs[0].list_channels();
1958         let channels1 = node_chanmgrs[1].list_channels();
1959         assert_eq!(channels0.len(), 1);
1960         assert_eq!(channels1.len(), 1);
1961
1962         let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100000);
1963         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1964         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1965
1966         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1967         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1968 }
1969
1970 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 {
1971         (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1972 }
1973
1974 #[test]
1975 fn test_channel_reserve_holding_cell_htlcs() {
1976         let chanmon_cfgs = create_chanmon_cfgs(3);
1977         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1978         // When this test was written, the default base fee floated based on the HTLC count.
1979         // It is now fixed, so we simply set the fee to the expected value here.
1980         let mut config = test_default_channel_config();
1981         config.channel_options.forwarding_fee_base_msat = 239;
1982         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1983         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1984         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known());
1985         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, InitFeatures::known(), InitFeatures::known());
1986
1987         let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
1988         let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
1989
1990         let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
1991         let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
1992
1993         macro_rules! expect_forward {
1994                 ($node: expr) => {{
1995                         let mut events = $node.node.get_and_clear_pending_msg_events();
1996                         assert_eq!(events.len(), 1);
1997                         check_added_monitors!($node, 1);
1998                         let payment_event = SendEvent::from_event(events.remove(0));
1999                         payment_event
2000                 }}
2001         }
2002
2003         let feemsat = 239; // set above
2004         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
2005         let feerate = get_feerate!(nodes[0], chan_1.2);
2006
2007         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
2008
2009         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
2010         {
2011                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0);
2012                 route.paths[0].last_mut().unwrap().fee_msat += 1;
2013                 assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
2014                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
2015                         assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
2016                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2017                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
2018         }
2019
2020         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
2021         // nodes[0]'s wealth
2022         loop {
2023                 let amt_msat = recv_value_0 + total_fee_msat;
2024                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
2025                 // Also, ensure that each payment has enough to be over the dust limit to
2026                 // ensure it'll be included in each commit tx fee calculation.
2027                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
2028                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
2029                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
2030                         break;
2031                 }
2032                 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
2033
2034                 let (stat01_, stat11_, stat12_, stat22_) = (
2035                         get_channel_value_stat!(nodes[0], chan_1.2),
2036                         get_channel_value_stat!(nodes[1], chan_1.2),
2037                         get_channel_value_stat!(nodes[1], chan_2.2),
2038                         get_channel_value_stat!(nodes[2], chan_2.2),
2039                 );
2040
2041                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
2042                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
2043                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
2044                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
2045                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
2046         }
2047
2048         // adding pending output.
2049         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
2050         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
2051         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
2052         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
2053         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
2054         // cases where 1 msat over X amount will cause a payment failure, but anything less than
2055         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
2056         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
2057         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
2058         // policy.
2059         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1);
2060         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
2061         let amt_msat_1 = recv_value_1 + total_fee_msat;
2062
2063         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
2064         let payment_event_1 = {
2065                 nodes[0].node.send_payment(&route_1, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
2066                 check_added_monitors!(nodes[0], 1);
2067
2068                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2069                 assert_eq!(events.len(), 1);
2070                 SendEvent::from_event(events.remove(0))
2071         };
2072         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
2073
2074         // channel reserve test with htlc pending output > 0
2075         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
2076         {
2077                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_2 + 1);
2078                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
2079                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
2080                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2081         }
2082
2083         // split the rest to test holding cell
2084         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
2085         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
2086         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
2087         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
2088         {
2089                 let stat = get_channel_value_stat!(nodes[0], chan_1.2);
2090                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
2091         }
2092
2093         // now see if they go through on both sides
2094         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
2095         // but this will stuck in the holding cell
2096         nodes[0].node.send_payment(&route_21, our_payment_hash_21, &Some(our_payment_secret_21)).unwrap();
2097         check_added_monitors!(nodes[0], 0);
2098         let events = nodes[0].node.get_and_clear_pending_events();
2099         assert_eq!(events.len(), 0);
2100
2101         // test with outbound holding cell amount > 0
2102         {
2103                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22+1);
2104                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
2105                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
2106                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2107                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 2);
2108         }
2109
2110         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2111         // this will also stuck in the holding cell
2112         nodes[0].node.send_payment(&route_22, our_payment_hash_22, &Some(our_payment_secret_22)).unwrap();
2113         check_added_monitors!(nodes[0], 0);
2114         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2115         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2116
2117         // flush the pending htlc
2118         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2119         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2120         check_added_monitors!(nodes[1], 1);
2121
2122         // the pending htlc should be promoted to committed
2123         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2124         check_added_monitors!(nodes[0], 1);
2125         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2126
2127         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2128         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2129         // No commitment_signed so get_event_msg's assert(len == 1) passes
2130         check_added_monitors!(nodes[0], 1);
2131
2132         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2133         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2134         check_added_monitors!(nodes[1], 1);
2135
2136         expect_pending_htlcs_forwardable!(nodes[1]);
2137
2138         let ref payment_event_11 = expect_forward!(nodes[1]);
2139         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2140         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2141
2142         expect_pending_htlcs_forwardable!(nodes[2]);
2143         expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2144
2145         // flush the htlcs in the holding cell
2146         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2147         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2148         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2149         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2150         expect_pending_htlcs_forwardable!(nodes[1]);
2151
2152         let ref payment_event_3 = expect_forward!(nodes[1]);
2153         assert_eq!(payment_event_3.msgs.len(), 2);
2154         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2155         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2156
2157         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2158         expect_pending_htlcs_forwardable!(nodes[2]);
2159
2160         let events = nodes[2].node.get_and_clear_pending_events();
2161         assert_eq!(events.len(), 2);
2162         match events[0] {
2163                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
2164                         assert_eq!(our_payment_hash_21, *payment_hash);
2165                         assert_eq!(recv_value_21, amt);
2166                         match &purpose {
2167                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2168                                         assert!(payment_preimage.is_none());
2169                                         assert_eq!(our_payment_secret_21, *payment_secret);
2170                                 },
2171                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2172                         }
2173                 },
2174                 _ => panic!("Unexpected event"),
2175         }
2176         match events[1] {
2177                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
2178                         assert_eq!(our_payment_hash_22, *payment_hash);
2179                         assert_eq!(recv_value_22, amt);
2180                         match &purpose {
2181                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2182                                         assert!(payment_preimage.is_none());
2183                                         assert_eq!(our_payment_secret_22, *payment_secret);
2184                                 },
2185                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2186                         }
2187                 },
2188                 _ => panic!("Unexpected event"),
2189         }
2190
2191         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2192         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2193         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2194
2195         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1);
2196         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2197         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2198
2199         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1);
2200         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2201         let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
2202         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2203         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2204
2205         let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
2206         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2207 }
2208
2209 #[test]
2210 fn channel_reserve_in_flight_removes() {
2211         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2212         // can send to its counterparty, but due to update ordering, the other side may not yet have
2213         // considered those HTLCs fully removed.
2214         // This tests that we don't count HTLCs which will not be included in the next remote
2215         // commitment transaction towards the reserve value (as it implies no commitment transaction
2216         // will be generated which violates the remote reserve value).
2217         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2218         // To test this we:
2219         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2220         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2221         //    you only consider the value of the first HTLC, it may not),
2222         //  * start routing a third HTLC from A to B,
2223         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2224         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2225         //  * deliver the first fulfill from B
2226         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2227         //    claim,
2228         //  * deliver A's response CS and RAA.
2229         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2230         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2231         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2232         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2233         let chanmon_cfgs = create_chanmon_cfgs(2);
2234         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2235         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2236         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2237         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2238         let logger = test_utils::TestLogger::new();
2239
2240         let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
2241         // Route the first two HTLCs.
2242         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
2243         let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
2244
2245         // Start routing the third HTLC (this is just used to get everyone in the right state).
2246         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
2247         let send_1 = {
2248                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
2249                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
2250                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
2251                 check_added_monitors!(nodes[0], 1);
2252                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2253                 assert_eq!(events.len(), 1);
2254                 SendEvent::from_event(events.remove(0))
2255         };
2256
2257         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2258         // initial fulfill/CS.
2259         assert!(nodes[1].node.claim_funds(payment_preimage_1));
2260         check_added_monitors!(nodes[1], 1);
2261         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2262
2263         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2264         // remove the second HTLC when we send the HTLC back from B to A.
2265         assert!(nodes[1].node.claim_funds(payment_preimage_2));
2266         check_added_monitors!(nodes[1], 1);
2267         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2268
2269         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2270         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2271         check_added_monitors!(nodes[0], 1);
2272         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2273         expect_payment_sent!(nodes[0], payment_preimage_1);
2274
2275         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2276         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2277         check_added_monitors!(nodes[1], 1);
2278         // B is already AwaitingRAA, so cant generate a CS here
2279         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2280
2281         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2282         check_added_monitors!(nodes[1], 1);
2283         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2284
2285         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2286         check_added_monitors!(nodes[0], 1);
2287         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2288
2289         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2290         check_added_monitors!(nodes[1], 1);
2291         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2292
2293         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2294         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2295         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2296         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2297         // on-chain as necessary).
2298         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2299         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2300         check_added_monitors!(nodes[0], 1);
2301         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2302         expect_payment_sent!(nodes[0], payment_preimage_2);
2303
2304         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2305         check_added_monitors!(nodes[1], 1);
2306         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2307
2308         expect_pending_htlcs_forwardable!(nodes[1]);
2309         expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2310
2311         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2312         // resolve the second HTLC from A's point of view.
2313         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2314         check_added_monitors!(nodes[0], 1);
2315         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2316
2317         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2318         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2319         let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]);
2320         let send_2 = {
2321                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
2322                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 10000, TEST_FINAL_CLTV, &logger).unwrap();
2323                 nodes[1].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap();
2324                 check_added_monitors!(nodes[1], 1);
2325                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2326                 assert_eq!(events.len(), 1);
2327                 SendEvent::from_event(events.remove(0))
2328         };
2329
2330         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2331         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2332         check_added_monitors!(nodes[0], 1);
2333         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2334
2335         // Now just resolve all the outstanding messages/HTLCs for completeness...
2336
2337         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2338         check_added_monitors!(nodes[1], 1);
2339         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2340
2341         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2342         check_added_monitors!(nodes[1], 1);
2343
2344         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2345         check_added_monitors!(nodes[0], 1);
2346         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2347
2348         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2349         check_added_monitors!(nodes[1], 1);
2350         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2351
2352         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2353         check_added_monitors!(nodes[0], 1);
2354
2355         expect_pending_htlcs_forwardable!(nodes[0]);
2356         expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2357
2358         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2359         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2360 }
2361
2362 #[test]
2363 fn channel_monitor_network_test() {
2364         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2365         // tests that ChannelMonitor is able to recover from various states.
2366         let chanmon_cfgs = create_chanmon_cfgs(5);
2367         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2368         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2369         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2370
2371         // Create some initial channels
2372         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2373         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2374         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
2375         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
2376
2377         // Make sure all nodes are at the same starting height
2378         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2379         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2380         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2381         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2382         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2383
2384         // Rebalance the network a bit by relaying one payment through all the channels...
2385         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2386         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2387         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2388         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2389
2390         // Simple case with no pending HTLCs:
2391         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
2392         check_added_monitors!(nodes[1], 1);
2393         check_closed_broadcast!(nodes[1], false);
2394         {
2395                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2396                 assert_eq!(node_txn.len(), 1);
2397                 mine_transaction(&nodes[0], &node_txn[0]);
2398                 check_added_monitors!(nodes[0], 1);
2399                 test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
2400         }
2401         check_closed_broadcast!(nodes[0], true);
2402         assert_eq!(nodes[0].node.list_channels().len(), 0);
2403         assert_eq!(nodes[1].node.list_channels().len(), 1);
2404
2405         // One pending HTLC is discarded by the force-close:
2406         let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
2407
2408         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2409         // broadcasted until we reach the timelock time).
2410         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
2411         check_closed_broadcast!(nodes[1], false);
2412         check_added_monitors!(nodes[1], 1);
2413         {
2414                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2415                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2416                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2417                 mine_transaction(&nodes[2], &node_txn[0]);
2418                 check_added_monitors!(nodes[2], 1);
2419                 test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
2420         }
2421         check_closed_broadcast!(nodes[2], true);
2422         assert_eq!(nodes[1].node.list_channels().len(), 0);
2423         assert_eq!(nodes[2].node.list_channels().len(), 1);
2424
2425         macro_rules! claim_funds {
2426                 ($node: expr, $prev_node: expr, $preimage: expr) => {
2427                         {
2428                                 assert!($node.node.claim_funds($preimage));
2429                                 check_added_monitors!($node, 1);
2430
2431                                 let events = $node.node.get_and_clear_pending_msg_events();
2432                                 assert_eq!(events.len(), 1);
2433                                 match events[0] {
2434                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2435                                                 assert!(update_add_htlcs.is_empty());
2436                                                 assert!(update_fail_htlcs.is_empty());
2437                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2438                                         },
2439                                         _ => panic!("Unexpected event"),
2440                                 };
2441                         }
2442                 }
2443         }
2444
2445         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2446         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2447         nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
2448         check_added_monitors!(nodes[2], 1);
2449         check_closed_broadcast!(nodes[2], false);
2450         let node2_commitment_txid;
2451         {
2452                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2453                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2454                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2455                 node2_commitment_txid = node_txn[0].txid();
2456
2457                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2458                 claim_funds!(nodes[3], nodes[2], payment_preimage_1);
2459                 mine_transaction(&nodes[3], &node_txn[0]);
2460                 check_added_monitors!(nodes[3], 1);
2461                 check_preimage_claim(&nodes[3], &node_txn);
2462         }
2463         check_closed_broadcast!(nodes[3], true);
2464         assert_eq!(nodes[2].node.list_channels().len(), 0);
2465         assert_eq!(nodes[3].node.list_channels().len(), 1);
2466
2467         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2468         // confusing us in the following tests.
2469         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().remove(&OutPoint { txid: chan_3.3.txid(), index: 0 }).unwrap();
2470
2471         // One pending HTLC to time out:
2472         let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
2473         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2474         // buffer space).
2475
2476         let (close_chan_update_1, close_chan_update_2) = {
2477                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2478                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2479                 assert_eq!(events.len(), 2);
2480                 let close_chan_update_1 = match events[0] {
2481                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2482                                 msg.clone()
2483                         },
2484                         _ => panic!("Unexpected event"),
2485                 };
2486                 match events[1] {
2487                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2488                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2489                         },
2490                         _ => panic!("Unexpected event"),
2491                 }
2492                 check_added_monitors!(nodes[3], 1);
2493
2494                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2495                 {
2496                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2497                         node_txn.retain(|tx| {
2498                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2499                                         false
2500                                 } else { true }
2501                         });
2502                 }
2503
2504                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2505
2506                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2507                 claim_funds!(nodes[4], nodes[3], payment_preimage_2);
2508
2509                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2510                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2511                 assert_eq!(events.len(), 2);
2512                 let close_chan_update_2 = match events[0] {
2513                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2514                                 msg.clone()
2515                         },
2516                         _ => panic!("Unexpected event"),
2517                 };
2518                 match events[1] {
2519                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2520                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2521                         },
2522                         _ => panic!("Unexpected event"),
2523                 }
2524                 check_added_monitors!(nodes[4], 1);
2525                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2526
2527                 mine_transaction(&nodes[4], &node_txn[0]);
2528                 check_preimage_claim(&nodes[4], &node_txn);
2529                 (close_chan_update_1, close_chan_update_2)
2530         };
2531         nodes[3].net_graph_msg_handler.handle_channel_update(&close_chan_update_2).unwrap();
2532         nodes[4].net_graph_msg_handler.handle_channel_update(&close_chan_update_1).unwrap();
2533         assert_eq!(nodes[3].node.list_channels().len(), 0);
2534         assert_eq!(nodes[4].node.list_channels().len(), 0);
2535
2536         nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
2537 }
2538
2539 #[test]
2540 fn test_justice_tx() {
2541         // Test justice txn built on revoked HTLC-Success tx, against both sides
2542         let mut alice_config = UserConfig::default();
2543         alice_config.channel_options.announced_channel = true;
2544         alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
2545         alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
2546         let mut bob_config = UserConfig::default();
2547         bob_config.channel_options.announced_channel = true;
2548         bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
2549         bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
2550         let user_cfgs = [Some(alice_config), Some(bob_config)];
2551         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2552         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2553         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2554         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2555         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2556         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2557         // Create some new channels:
2558         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2559
2560         // A pending HTLC which will be revoked:
2561         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2562         // Get the will-be-revoked local txn from nodes[0]
2563         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2564         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2565         assert_eq!(revoked_local_txn[0].input.len(), 1);
2566         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2567         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2568         assert_eq!(revoked_local_txn[1].input.len(), 1);
2569         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2570         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2571         // Revoke the old state
2572         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2573
2574         {
2575                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2576                 {
2577                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2578                         assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment tx
2579                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2580
2581                         check_spends!(node_txn[0], revoked_local_txn[0]);
2582                         node_txn.swap_remove(0);
2583                         node_txn.truncate(1);
2584                 }
2585                 check_added_monitors!(nodes[1], 1);
2586                 test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
2587
2588                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2589                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2590                 // Verify broadcast of revoked HTLC-timeout
2591                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2592                 check_added_monitors!(nodes[0], 1);
2593                 // Broadcast revoked HTLC-timeout on node 1
2594                 mine_transaction(&nodes[1], &node_txn[1]);
2595                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2596         }
2597         get_announce_close_broadcast_events(&nodes, 0, 1);
2598
2599         assert_eq!(nodes[0].node.list_channels().len(), 0);
2600         assert_eq!(nodes[1].node.list_channels().len(), 0);
2601
2602         // We test justice_tx build by A on B's revoked HTLC-Success tx
2603         // Create some new channels:
2604         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2605         {
2606                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2607                 node_txn.clear();
2608         }
2609
2610         // A pending HTLC which will be revoked:
2611         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2612         // Get the will-be-revoked local txn from B
2613         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2614         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2615         assert_eq!(revoked_local_txn[0].input.len(), 1);
2616         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2617         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2618         // Revoke the old state
2619         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2620         {
2621                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2622                 {
2623                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2624                         assert_eq!(node_txn.len(), 2); //ChannelMonitor: penalty tx, ChannelManager: local commitment tx
2625                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2626
2627                         check_spends!(node_txn[0], revoked_local_txn[0]);
2628                         node_txn.swap_remove(0);
2629                 }
2630                 check_added_monitors!(nodes[0], 1);
2631                 test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
2632
2633                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2634                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2635                 check_added_monitors!(nodes[1], 1);
2636                 mine_transaction(&nodes[0], &node_txn[1]);
2637                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2638         }
2639         get_announce_close_broadcast_events(&nodes, 0, 1);
2640         assert_eq!(nodes[0].node.list_channels().len(), 0);
2641         assert_eq!(nodes[1].node.list_channels().len(), 0);
2642 }
2643
2644 #[test]
2645 fn revoked_output_claim() {
2646         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2647         // transaction is broadcast by its counterparty
2648         let chanmon_cfgs = create_chanmon_cfgs(2);
2649         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2650         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2651         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2652         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2653         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2654         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2655         assert_eq!(revoked_local_txn.len(), 1);
2656         // Only output is the full channel value back to nodes[0]:
2657         assert_eq!(revoked_local_txn[0].output.len(), 1);
2658         // Send a payment through, updating everyone's latest commitment txn
2659         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2660
2661         // Inform nodes[1] that nodes[0] broadcast a stale tx
2662         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2663         check_added_monitors!(nodes[1], 1);
2664         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2665         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
2666
2667         check_spends!(node_txn[0], revoked_local_txn[0]);
2668         check_spends!(node_txn[1], chan_1.3);
2669
2670         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2671         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2672         get_announce_close_broadcast_events(&nodes, 0, 1);
2673         check_added_monitors!(nodes[0], 1)
2674 }
2675
2676 #[test]
2677 fn claim_htlc_outputs_shared_tx() {
2678         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2679         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2680         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2681         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2682         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2683         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2684
2685         // Create some new channel:
2686         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2687
2688         // Rebalance the network to generate htlc in the two directions
2689         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
2690         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2691         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2692         let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
2693
2694         // Get the will-be-revoked local txn from node[0]
2695         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2696         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2697         assert_eq!(revoked_local_txn[0].input.len(), 1);
2698         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2699         assert_eq!(revoked_local_txn[1].input.len(), 1);
2700         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2701         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2702         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2703
2704         //Revoke the old state
2705         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2706
2707         {
2708                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2709                 check_added_monitors!(nodes[0], 1);
2710                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2711                 check_added_monitors!(nodes[1], 1);
2712                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2713                 expect_payment_failed!(nodes[1], payment_hash_2, true);
2714
2715                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2716                 assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
2717
2718                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2719                 check_spends!(node_txn[0], revoked_local_txn[0]);
2720
2721                 let mut witness_lens = BTreeSet::new();
2722                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2723                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2724                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2725                 assert_eq!(witness_lens.len(), 3);
2726                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2727                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2728                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2729
2730                 // Next nodes[1] broadcasts its current local tx state:
2731                 assert_eq!(node_txn[1].input.len(), 1);
2732                 assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
2733         }
2734         get_announce_close_broadcast_events(&nodes, 0, 1);
2735         assert_eq!(nodes[0].node.list_channels().len(), 0);
2736         assert_eq!(nodes[1].node.list_channels().len(), 0);
2737 }
2738
2739 #[test]
2740 fn claim_htlc_outputs_single_tx() {
2741         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2742         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2743         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2744         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2745         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2746         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2747
2748         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2749
2750         // Rebalance the network to generate htlc in the two directions
2751         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
2752         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2753         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2754         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2755         let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
2756
2757         // Get the will-be-revoked local txn from node[0]
2758         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2759
2760         //Revoke the old state
2761         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2762
2763         {
2764                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2765                 check_added_monitors!(nodes[0], 1);
2766                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2767                 check_added_monitors!(nodes[1], 1);
2768                 expect_pending_htlcs_forwardable_ignore!(nodes[0]);
2769
2770                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2771                 expect_payment_failed!(nodes[1], payment_hash_2, true);
2772
2773                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2774                 assert_eq!(node_txn.len(), 9);
2775                 // ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3)
2776                 // ChannelManager: local commmitment + local HTLC-timeout (2)
2777                 // ChannelMonitor: bumped justice tx, after one increase, bumps on HTLC aren't generated not being substantial anymore, bump on revoked to_local isn't generated due to more room for expiration (2)
2778                 // ChannelMonitor: local commitment + local HTLC-timeout (2)
2779
2780                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2781                 assert_eq!(node_txn[0].input.len(), 1);
2782                 check_spends!(node_txn[0], chan_1.3);
2783                 assert_eq!(node_txn[1].input.len(), 1);
2784                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2785                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2786                 check_spends!(node_txn[1], node_txn[0]);
2787
2788                 // Justice transactions are indices 1-2-4
2789                 assert_eq!(node_txn[2].input.len(), 1);
2790                 assert_eq!(node_txn[3].input.len(), 1);
2791                 assert_eq!(node_txn[4].input.len(), 1);
2792
2793                 check_spends!(node_txn[2], revoked_local_txn[0]);
2794                 check_spends!(node_txn[3], revoked_local_txn[0]);
2795                 check_spends!(node_txn[4], revoked_local_txn[0]);
2796
2797                 let mut witness_lens = BTreeSet::new();
2798                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2799                 witness_lens.insert(node_txn[3].input[0].witness.last().unwrap().len());
2800                 witness_lens.insert(node_txn[4].input[0].witness.last().unwrap().len());
2801                 assert_eq!(witness_lens.len(), 3);
2802                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2803                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2804                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2805         }
2806         get_announce_close_broadcast_events(&nodes, 0, 1);
2807         assert_eq!(nodes[0].node.list_channels().len(), 0);
2808         assert_eq!(nodes[1].node.list_channels().len(), 0);
2809 }
2810
2811 #[test]
2812 fn test_htlc_on_chain_success() {
2813         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2814         // the preimage backward accordingly. So here we test that ChannelManager is
2815         // broadcasting the right event to other nodes in payment path.
2816         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2817         // A --------------------> B ----------------------> C (preimage)
2818         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2819         // commitment transaction was broadcast.
2820         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2821         // towards B.
2822         // B should be able to claim via preimage if A then broadcasts its local tx.
2823         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2824         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2825         // PaymentSent event).
2826
2827         let chanmon_cfgs = create_chanmon_cfgs(3);
2828         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2829         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2830         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2831
2832         // Create some initial channels
2833         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2834         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2835
2836         // Ensure all nodes are at the same height
2837         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2838         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2839         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2840         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2841
2842         // Rebalance the network a bit by relaying one payment through all the channels...
2843         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2844         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2845
2846         let (our_payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2847         let (our_payment_preimage_2, _payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2848
2849         // Broadcast legit commitment tx from C on B's chain
2850         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2851         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2852         assert_eq!(commitment_tx.len(), 1);
2853         check_spends!(commitment_tx[0], chan_2.3);
2854         nodes[2].node.claim_funds(our_payment_preimage);
2855         nodes[2].node.claim_funds(our_payment_preimage_2);
2856         check_added_monitors!(nodes[2], 2);
2857         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2858         assert!(updates.update_add_htlcs.is_empty());
2859         assert!(updates.update_fail_htlcs.is_empty());
2860         assert!(updates.update_fail_malformed_htlcs.is_empty());
2861         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2862
2863         mine_transaction(&nodes[2], &commitment_tx[0]);
2864         check_closed_broadcast!(nodes[2], true);
2865         check_added_monitors!(nodes[2], 1);
2866         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
2867         assert_eq!(node_txn.len(), 5);
2868         assert_eq!(node_txn[0], node_txn[3]);
2869         assert_eq!(node_txn[1], node_txn[4]);
2870         assert_eq!(node_txn[2], commitment_tx[0]);
2871         check_spends!(node_txn[0], commitment_tx[0]);
2872         check_spends!(node_txn[1], commitment_tx[0]);
2873         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2874         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2875         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2876         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2877         assert_eq!(node_txn[0].lock_time, 0);
2878         assert_eq!(node_txn[1].lock_time, 0);
2879
2880         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2881         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
2882         connect_block(&nodes[1], &Block { header, txdata: node_txn});
2883         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2884         {
2885                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2886                 assert_eq!(added_monitors.len(), 1);
2887                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2888                 added_monitors.clear();
2889         }
2890         let forwarded_events = nodes[1].node.get_and_clear_pending_events();
2891         assert_eq!(forwarded_events.len(), 2);
2892         if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
2893                 } else { panic!(); }
2894         if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
2895                 } else { panic!(); }
2896         let events = nodes[1].node.get_and_clear_pending_msg_events();
2897         {
2898                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2899                 assert_eq!(added_monitors.len(), 2);
2900                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2901                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2902                 added_monitors.clear();
2903         }
2904         assert_eq!(events.len(), 3);
2905         match events[0] {
2906                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2907                 _ => panic!("Unexpected event"),
2908         }
2909         match events[1] {
2910                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
2911                 _ => panic!("Unexpected event"),
2912         }
2913
2914         match events[2] {
2915                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2916                         assert!(update_add_htlcs.is_empty());
2917                         assert!(update_fail_htlcs.is_empty());
2918                         assert_eq!(update_fulfill_htlcs.len(), 1);
2919                         assert!(update_fail_malformed_htlcs.is_empty());
2920                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2921                 },
2922                 _ => panic!("Unexpected event"),
2923         };
2924         macro_rules! check_tx_local_broadcast {
2925                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { {
2926                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2927                         assert_eq!(node_txn.len(), 3);
2928                         // Node[1]: ChannelManager: 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 (timeout tx)
2929                         // Node[0]: ChannelManager: 3 (commtiemtn tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 HTLC-timeout
2930                         check_spends!(node_txn[1], $commitment_tx);
2931                         check_spends!(node_txn[2], $commitment_tx);
2932                         assert_ne!(node_txn[1].lock_time, 0);
2933                         assert_ne!(node_txn[2].lock_time, 0);
2934                         if $htlc_offered {
2935                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2936                                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2937                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2938                                 assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2939                         } else {
2940                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2941                                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2942                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2943                                 assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2944                         }
2945                         check_spends!(node_txn[0], $chan_tx);
2946                         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
2947                         node_txn.clear();
2948                 } }
2949         }
2950         // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
2951         // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a
2952         // timeout-claim of the output that nodes[2] just claimed via success.
2953         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3);
2954
2955         // Broadcast legit commitment tx from A on B's chain
2956         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2957         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2958         check_spends!(node_a_commitment_tx[0], chan_1.3);
2959         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2960         check_closed_broadcast!(nodes[1], true);
2961         check_added_monitors!(nodes[1], 1);
2962         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2963         assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
2964         let commitment_spend =
2965                 if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2966                         check_spends!(node_txn[1], commitment_tx[0]);
2967                         check_spends!(node_txn[2], commitment_tx[0]);
2968                         assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2969                         &node_txn[0]
2970                 } else {
2971                         check_spends!(node_txn[0], commitment_tx[0]);
2972                         check_spends!(node_txn[1], commitment_tx[0]);
2973                         assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2974                         &node_txn[2]
2975                 };
2976
2977         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2978         assert_eq!(commitment_spend.input.len(), 2);
2979         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2980         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2981         assert_eq!(commitment_spend.lock_time, 0);
2982         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2983         check_spends!(node_txn[3], chan_1.3);
2984         assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
2985         check_spends!(node_txn[4], node_txn[3]);
2986         check_spends!(node_txn[5], node_txn[3]);
2987         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
2988         // we already checked the same situation with A.
2989
2990         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
2991         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
2992         connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
2993         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
2994         check_closed_broadcast!(nodes[0], true);
2995         check_added_monitors!(nodes[0], 1);
2996         let events = nodes[0].node.get_and_clear_pending_events();
2997         assert_eq!(events.len(), 2);
2998         let mut first_claimed = false;
2999         for event in events {
3000                 match event {
3001                         Event::PaymentSent { payment_preimage } => {
3002                                 if payment_preimage == our_payment_preimage {
3003                                         assert!(!first_claimed);
3004                                         first_claimed = true;
3005                                 } else {
3006                                         assert_eq!(payment_preimage, our_payment_preimage_2);
3007                                 }
3008                         },
3009                         _ => panic!("Unexpected event"),
3010                 }
3011         }
3012         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0], chan_1.3);
3013 }
3014
3015 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3016         // Test that in case of a unilateral close onchain, we detect the state of output and
3017         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
3018         // broadcasting the right event to other nodes in payment path.
3019         // A ------------------> B ----------------------> C (timeout)
3020         //    B's commitment tx                 C's commitment tx
3021         //            \                                  \
3022         //         B's HTLC timeout tx               B's timeout tx
3023
3024         let chanmon_cfgs = create_chanmon_cfgs(3);
3025         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3026         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3027         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3028         *nodes[0].connect_style.borrow_mut() = connect_style;
3029         *nodes[1].connect_style.borrow_mut() = connect_style;
3030         *nodes[2].connect_style.borrow_mut() = connect_style;
3031
3032         // Create some intial channels
3033         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3034         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3035
3036         // Rebalance the network a bit by relaying one payment thorugh all the channels...
3037         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3038         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
3039
3040         let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
3041
3042         // Broadcast legit commitment tx from C on B's chain
3043         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
3044         check_spends!(commitment_tx[0], chan_2.3);
3045         nodes[2].node.fail_htlc_backwards(&payment_hash);
3046         check_added_monitors!(nodes[2], 0);
3047         expect_pending_htlcs_forwardable!(nodes[2]);
3048         check_added_monitors!(nodes[2], 1);
3049
3050         let events = nodes[2].node.get_and_clear_pending_msg_events();
3051         assert_eq!(events.len(), 1);
3052         match events[0] {
3053                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3054                         assert!(update_add_htlcs.is_empty());
3055                         assert!(!update_fail_htlcs.is_empty());
3056                         assert!(update_fulfill_htlcs.is_empty());
3057                         assert!(update_fail_malformed_htlcs.is_empty());
3058                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
3059                 },
3060                 _ => panic!("Unexpected event"),
3061         };
3062         mine_transaction(&nodes[2], &commitment_tx[0]);
3063         check_closed_broadcast!(nodes[2], true);
3064         check_added_monitors!(nodes[2], 1);
3065         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
3066         assert_eq!(node_txn.len(), 1);
3067         check_spends!(node_txn[0], chan_2.3);
3068         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
3069
3070         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3071         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3072         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3073         mine_transaction(&nodes[1], &commitment_tx[0]);
3074         let timeout_tx;
3075         {
3076                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
3077                 assert_eq!(node_txn.len(), 5); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (local commitment tx + HTLC-timeout), 1 timeout tx
3078                 assert_eq!(node_txn[0], node_txn[3]);
3079                 assert_eq!(node_txn[1], node_txn[4]);
3080
3081                 check_spends!(node_txn[2], commitment_tx[0]);
3082                 assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3083
3084                 check_spends!(node_txn[0], chan_2.3);
3085                 check_spends!(node_txn[1], node_txn[0]);
3086                 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
3087                 assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3088
3089                 timeout_tx = node_txn[2].clone();
3090                 node_txn.clear();
3091         }
3092
3093         mine_transaction(&nodes[1], &timeout_tx);
3094         check_added_monitors!(nodes[1], 1);
3095         check_closed_broadcast!(nodes[1], true);
3096         {
3097                 // B will rebroadcast a fee-bumped timeout transaction here.
3098                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3099                 assert_eq!(node_txn.len(), 1);
3100                 check_spends!(node_txn[0], commitment_tx[0]);
3101         }
3102
3103         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3104         {
3105                 // B may rebroadcast its own holder commitment transaction here, as a safeguard against
3106                 // some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
3107                 // original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
3108                 // shouldn't broadcast anything here, and in some connect style scenarios we do not.
3109                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3110                 if node_txn.len() == 1 {
3111                         check_spends!(node_txn[0], chan_2.3);
3112                 } else {
3113                         assert_eq!(node_txn.len(), 0);
3114                 }
3115         }
3116
3117         expect_pending_htlcs_forwardable!(nodes[1]);
3118         check_added_monitors!(nodes[1], 1);
3119         let events = nodes[1].node.get_and_clear_pending_msg_events();
3120         assert_eq!(events.len(), 1);
3121         match events[0] {
3122                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3123                         assert!(update_add_htlcs.is_empty());
3124                         assert!(!update_fail_htlcs.is_empty());
3125                         assert!(update_fulfill_htlcs.is_empty());
3126                         assert!(update_fail_malformed_htlcs.is_empty());
3127                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3128                 },
3129                 _ => panic!("Unexpected event"),
3130         };
3131
3132         // Broadcast legit commitment tx from B on A's chain
3133         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3134         check_spends!(commitment_tx[0], chan_1.3);
3135
3136         mine_transaction(&nodes[0], &commitment_tx[0]);
3137         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
3138
3139         check_closed_broadcast!(nodes[0], true);
3140         check_added_monitors!(nodes[0], 1);
3141         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
3142         assert_eq!(node_txn.len(), 2);
3143         check_spends!(node_txn[0], chan_1.3);
3144         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
3145         check_spends!(node_txn[1], commitment_tx[0]);
3146         assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3147 }
3148
3149 #[test]
3150 fn test_htlc_on_chain_timeout() {
3151         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3152         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3153         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3154 }
3155
3156 #[test]
3157 fn test_simple_commitment_revoked_fail_backward() {
3158         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3159         // and fail backward accordingly.
3160
3161         let chanmon_cfgs = create_chanmon_cfgs(3);
3162         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3163         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3164         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3165
3166         // Create some initial channels
3167         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3168         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3169
3170         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3171         // Get the will-be-revoked local txn from nodes[2]
3172         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3173         // Revoke the old state
3174         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3175
3176         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3177
3178         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3179         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3180         check_added_monitors!(nodes[1], 1);
3181         check_closed_broadcast!(nodes[1], true);
3182
3183         expect_pending_htlcs_forwardable!(nodes[1]);
3184         check_added_monitors!(nodes[1], 1);
3185         let events = nodes[1].node.get_and_clear_pending_msg_events();
3186         assert_eq!(events.len(), 1);
3187         match events[0] {
3188                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3189                         assert!(update_add_htlcs.is_empty());
3190                         assert_eq!(update_fail_htlcs.len(), 1);
3191                         assert!(update_fulfill_htlcs.is_empty());
3192                         assert!(update_fail_malformed_htlcs.is_empty());
3193                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3194
3195                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3196                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3197                         expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true);
3198                         expect_payment_failed!(nodes[0], payment_hash, false);
3199                 },
3200                 _ => panic!("Unexpected event"),
3201         }
3202 }
3203
3204 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3205         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3206         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3207         // commitment transaction anymore.
3208         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3209         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3210         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3211         // technically disallowed and we should probably handle it reasonably.
3212         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3213         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3214         // transactions:
3215         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3216         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3217         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3218         //   and once they revoke the previous commitment transaction (allowing us to send a new
3219         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3220         let chanmon_cfgs = create_chanmon_cfgs(3);
3221         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3222         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3223         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3224
3225         // Create some initial channels
3226         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3227         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3228
3229         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3230         // Get the will-be-revoked local txn from nodes[2]
3231         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3232         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3233         // Revoke the old state
3234         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3235
3236         let value = if use_dust {
3237                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3238                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3239                 nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
3240         } else { 3000000 };
3241
3242         let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3243         let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3244         let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3245
3246         assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
3247         expect_pending_htlcs_forwardable!(nodes[2]);
3248         check_added_monitors!(nodes[2], 1);
3249         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3250         assert!(updates.update_add_htlcs.is_empty());
3251         assert!(updates.update_fulfill_htlcs.is_empty());
3252         assert!(updates.update_fail_malformed_htlcs.is_empty());
3253         assert_eq!(updates.update_fail_htlcs.len(), 1);
3254         assert!(updates.update_fee.is_none());
3255         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3256         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3257         // Drop the last RAA from 3 -> 2
3258
3259         assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
3260         expect_pending_htlcs_forwardable!(nodes[2]);
3261         check_added_monitors!(nodes[2], 1);
3262         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3263         assert!(updates.update_add_htlcs.is_empty());
3264         assert!(updates.update_fulfill_htlcs.is_empty());
3265         assert!(updates.update_fail_malformed_htlcs.is_empty());
3266         assert_eq!(updates.update_fail_htlcs.len(), 1);
3267         assert!(updates.update_fee.is_none());
3268         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3269         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3270         check_added_monitors!(nodes[1], 1);
3271         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3272         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3273         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3274         check_added_monitors!(nodes[2], 1);
3275
3276         assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
3277         expect_pending_htlcs_forwardable!(nodes[2]);
3278         check_added_monitors!(nodes[2], 1);
3279         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3280         assert!(updates.update_add_htlcs.is_empty());
3281         assert!(updates.update_fulfill_htlcs.is_empty());
3282         assert!(updates.update_fail_malformed_htlcs.is_empty());
3283         assert_eq!(updates.update_fail_htlcs.len(), 1);
3284         assert!(updates.update_fee.is_none());
3285         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3286         // At this point first_payment_hash has dropped out of the latest two commitment
3287         // transactions that nodes[1] is tracking...
3288         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3289         check_added_monitors!(nodes[1], 1);
3290         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3291         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3292         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3293         check_added_monitors!(nodes[2], 1);
3294
3295         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3296         // on nodes[2]'s RAA.
3297         let (_, fourth_payment_hash, fourth_payment_secret) = get_payment_preimage_hash!(nodes[2]);
3298         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
3299         let logger = test_utils::TestLogger::new();
3300         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3301         nodes[1].node.send_payment(&route, fourth_payment_hash, &Some(fourth_payment_secret)).unwrap();
3302         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3303         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3304         check_added_monitors!(nodes[1], 0);
3305
3306         if deliver_bs_raa {
3307                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3308                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3309                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3310                 check_added_monitors!(nodes[1], 1);
3311                 let events = nodes[1].node.get_and_clear_pending_events();
3312                 assert_eq!(events.len(), 1);
3313                 match events[0] {
3314                         Event::PendingHTLCsForwardable { .. } => { },
3315                         _ => panic!("Unexpected event"),
3316                 };
3317                 // Deliberately don't process the pending fail-back so they all fail back at once after
3318                 // block connection just like the !deliver_bs_raa case
3319         }
3320
3321         let mut failed_htlcs = HashSet::new();
3322         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3323
3324         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3325         check_added_monitors!(nodes[1], 1);
3326         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3327
3328         let events = nodes[1].node.get_and_clear_pending_events();
3329         assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
3330         match events[0] {
3331                 Event::PaymentFailed { ref payment_hash, .. } => {
3332                         assert_eq!(*payment_hash, fourth_payment_hash);
3333                 },
3334                 _ => panic!("Unexpected event"),
3335         }
3336         if !deliver_bs_raa {
3337                 match events[1] {
3338                         Event::PendingHTLCsForwardable { .. } => { },
3339                         _ => panic!("Unexpected event"),
3340                 };
3341         }
3342         nodes[1].node.process_pending_htlc_forwards();
3343         check_added_monitors!(nodes[1], 1);
3344
3345         let events = nodes[1].node.get_and_clear_pending_msg_events();
3346         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3347         match events[if deliver_bs_raa { 1 } else { 0 }] {
3348                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3349                 _ => panic!("Unexpected event"),
3350         }
3351         match events[if deliver_bs_raa { 2 } else { 1 }] {
3352                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
3353                         assert_eq!(channel_id, chan_2.2);
3354                         assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain.");
3355                 },
3356                 _ => panic!("Unexpected event"),
3357         }
3358         if deliver_bs_raa {
3359                 match events[0] {
3360                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3361                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3362                                 assert_eq!(update_add_htlcs.len(), 1);
3363                                 assert!(update_fulfill_htlcs.is_empty());
3364                                 assert!(update_fail_htlcs.is_empty());
3365                                 assert!(update_fail_malformed_htlcs.is_empty());
3366                         },
3367                         _ => panic!("Unexpected event"),
3368                 }
3369         }
3370         match events[if deliver_bs_raa { 3 } else { 2 }] {
3371                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3372                         assert!(update_add_htlcs.is_empty());
3373                         assert_eq!(update_fail_htlcs.len(), 3);
3374                         assert!(update_fulfill_htlcs.is_empty());
3375                         assert!(update_fail_malformed_htlcs.is_empty());
3376                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3377
3378                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3379                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3380                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3381
3382                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3383
3384                         let events = nodes[0].node.get_and_clear_pending_msg_events();
3385                         // If we delivered B's RAA we got an unknown preimage error, not something
3386                         // that we should update our routing table for.
3387                         assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
3388                         for event in events {
3389                                 match event {
3390                                         MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
3391                                         _ => panic!("Unexpected event"),
3392                                 }
3393                         }
3394                         let events = nodes[0].node.get_and_clear_pending_events();
3395                         assert_eq!(events.len(), 3);
3396                         match events[0] {
3397                                 Event::PaymentFailed { ref payment_hash, .. } => {
3398                                         assert!(failed_htlcs.insert(payment_hash.0));
3399                                 },
3400                                 _ => panic!("Unexpected event"),
3401                         }
3402                         match events[1] {
3403                                 Event::PaymentFailed { ref payment_hash, .. } => {
3404                                         assert!(failed_htlcs.insert(payment_hash.0));
3405                                 },
3406                                 _ => panic!("Unexpected event"),
3407                         }
3408                         match events[2] {
3409                                 Event::PaymentFailed { ref payment_hash, .. } => {
3410                                         assert!(failed_htlcs.insert(payment_hash.0));
3411                                 },
3412                                 _ => panic!("Unexpected event"),
3413                         }
3414                 },
3415                 _ => panic!("Unexpected event"),
3416         }
3417
3418         assert!(failed_htlcs.contains(&first_payment_hash.0));
3419         assert!(failed_htlcs.contains(&second_payment_hash.0));
3420         assert!(failed_htlcs.contains(&third_payment_hash.0));
3421 }
3422
3423 #[test]
3424 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3425         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3426         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3427         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3428         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3429 }
3430
3431 #[test]
3432 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3433         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3434         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3435         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3436         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3437 }
3438
3439 #[test]
3440 fn fail_backward_pending_htlc_upon_channel_failure() {
3441         let chanmon_cfgs = create_chanmon_cfgs(2);
3442         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3443         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3444         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3445         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
3446         let logger = test_utils::TestLogger::new();
3447
3448         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3449         {
3450                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]);
3451                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3452                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3453                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
3454                 check_added_monitors!(nodes[0], 1);
3455
3456                 let payment_event = {
3457                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3458                         assert_eq!(events.len(), 1);
3459                         SendEvent::from_event(events.remove(0))
3460                 };
3461                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3462                 assert_eq!(payment_event.msgs.len(), 1);
3463         }
3464
3465         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3466         let (_, failed_payment_hash, failed_payment_secret) = get_payment_preimage_hash!(nodes[1]);
3467         {
3468                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3469                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3470                 nodes[0].node.send_payment(&route, failed_payment_hash, &Some(failed_payment_secret)).unwrap();
3471                 check_added_monitors!(nodes[0], 0);
3472
3473                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3474         }
3475
3476         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3477         {
3478                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
3479
3480                 let secp_ctx = Secp256k1::new();
3481                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3482                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3483                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
3484                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3485                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height, &None).unwrap();
3486                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3487                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
3488
3489                 // Send a 0-msat update_add_htlc to fail the channel.
3490                 let update_add_htlc = msgs::UpdateAddHTLC {
3491                         channel_id: chan.2,
3492                         htlc_id: 0,
3493                         amount_msat: 0,
3494                         payment_hash,
3495                         cltv_expiry,
3496                         onion_routing_packet,
3497                 };
3498                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3499         }
3500
3501         // Check that Alice fails backward the pending HTLC from the second payment.
3502         expect_payment_failed!(nodes[0], failed_payment_hash, true);
3503         check_closed_broadcast!(nodes[0], true);
3504         check_added_monitors!(nodes[0], 1);
3505 }
3506
3507 #[test]
3508 fn test_htlc_ignore_latest_remote_commitment() {
3509         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3510         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3511         let chanmon_cfgs = create_chanmon_cfgs(2);
3512         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3513         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3514         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3515         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3516
3517         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3518         nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
3519         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3520         check_closed_broadcast!(nodes[0], true);
3521         check_added_monitors!(nodes[0], 1);
3522
3523         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
3524         assert_eq!(node_txn.len(), 3);
3525         assert_eq!(node_txn[0], node_txn[1]);
3526
3527         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3528         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
3529         check_closed_broadcast!(nodes[1], true);
3530         check_added_monitors!(nodes[1], 1);
3531
3532         // Duplicate the connect_block call since this may happen due to other listeners
3533         // registering new transactions
3534         header.prev_blockhash = header.block_hash();
3535         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
3536 }
3537
3538 #[test]
3539 fn test_force_close_fail_back() {
3540         // Check which HTLCs are failed-backwards on channel force-closure
3541         let chanmon_cfgs = create_chanmon_cfgs(3);
3542         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3543         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3544         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3545         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3546         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3547         let logger = test_utils::TestLogger::new();
3548
3549         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
3550
3551         let mut payment_event = {
3552                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3553                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, 42, &logger).unwrap();
3554                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
3555                 check_added_monitors!(nodes[0], 1);
3556
3557                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3558                 assert_eq!(events.len(), 1);
3559                 SendEvent::from_event(events.remove(0))
3560         };
3561
3562         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3563         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3564
3565         expect_pending_htlcs_forwardable!(nodes[1]);
3566
3567         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3568         assert_eq!(events_2.len(), 1);
3569         payment_event = SendEvent::from_event(events_2.remove(0));
3570         assert_eq!(payment_event.msgs.len(), 1);
3571
3572         check_added_monitors!(nodes[1], 1);
3573         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3574         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3575         check_added_monitors!(nodes[2], 1);
3576         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3577
3578         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3579         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3580         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3581
3582         nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
3583         check_closed_broadcast!(nodes[2], true);
3584         check_added_monitors!(nodes[2], 1);
3585         let tx = {
3586                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3587                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3588                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3589                 // back to nodes[1] upon timeout otherwise.
3590                 assert_eq!(node_txn.len(), 1);
3591                 node_txn.remove(0)
3592         };
3593
3594         mine_transaction(&nodes[1], &tx);
3595
3596         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3597         check_closed_broadcast!(nodes[1], true);
3598         check_added_monitors!(nodes[1], 1);
3599
3600         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3601         {
3602                 let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap();
3603                 monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
3604                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger);
3605         }
3606         mine_transaction(&nodes[2], &tx);
3607         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3608         assert_eq!(node_txn.len(), 1);
3609         assert_eq!(node_txn[0].input.len(), 1);
3610         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3611         assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
3612         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3613
3614         check_spends!(node_txn[0], tx);
3615 }
3616
3617 #[test]
3618 fn test_dup_events_on_peer_disconnect() {
3619         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3620         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3621         // as we used to generate the event immediately upon receipt of the payment preimage in the
3622         // update_fulfill_htlc message.
3623
3624         let chanmon_cfgs = create_chanmon_cfgs(2);
3625         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3626         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3627         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3628         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3629
3630         let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1000000).0;
3631
3632         assert!(nodes[1].node.claim_funds(payment_preimage));
3633         check_added_monitors!(nodes[1], 1);
3634         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3635         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3636         expect_payment_sent!(nodes[0], payment_preimage);
3637
3638         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3639         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3640
3641         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3642         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
3643 }
3644
3645 #[test]
3646 fn test_simple_peer_disconnect() {
3647         // Test that we can reconnect when there are no lost messages
3648         let chanmon_cfgs = create_chanmon_cfgs(3);
3649         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3650         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3651         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3652         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3653         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3654
3655         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3656         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3657         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3658
3659         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3660         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3661         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3662         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3663
3664         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3665         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3666         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3667
3668         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3669         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3670         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3671         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3672
3673         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3674         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3675
3676         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3677         fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
3678
3679         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
3680         {
3681                 let events = nodes[0].node.get_and_clear_pending_events();
3682                 assert_eq!(events.len(), 2);
3683                 match events[0] {
3684                         Event::PaymentSent { payment_preimage } => {
3685                                 assert_eq!(payment_preimage, payment_preimage_3);
3686                         },
3687                         _ => panic!("Unexpected event"),
3688                 }
3689                 match events[1] {
3690                         Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
3691                                 assert_eq!(payment_hash, payment_hash_5);
3692                                 assert!(rejected_by_dest);
3693                         },
3694                         _ => panic!("Unexpected event"),
3695                 }
3696         }
3697
3698         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3699         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3700 }
3701
3702 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3703         // Test that we can reconnect when in-flight HTLC updates get dropped
3704         let chanmon_cfgs = create_chanmon_cfgs(2);
3705         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3706         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3707         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3708
3709         let mut as_funding_locked = None;
3710         if messages_delivered == 0 {
3711                 let (funding_locked, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
3712                 as_funding_locked = Some(funding_locked);
3713                 // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
3714                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3715                 // it before the channel_reestablish message.
3716         } else {
3717                 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3718         }
3719
3720         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
3721
3722         let logger = test_utils::TestLogger::new();
3723         let payment_event = {
3724                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3725                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
3726                         &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
3727                         &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3728                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
3729                 check_added_monitors!(nodes[0], 1);
3730
3731                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3732                 assert_eq!(events.len(), 1);
3733                 SendEvent::from_event(events.remove(0))
3734         };
3735         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3736
3737         if messages_delivered < 2 {
3738                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3739         } else {
3740                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3741                 if messages_delivered >= 3 {
3742                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3743                         check_added_monitors!(nodes[1], 1);
3744                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3745
3746                         if messages_delivered >= 4 {
3747                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3748                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3749                                 check_added_monitors!(nodes[0], 1);
3750
3751                                 if messages_delivered >= 5 {
3752                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3753                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3754                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3755                                         check_added_monitors!(nodes[0], 1);
3756
3757                                         if messages_delivered >= 6 {
3758                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3759                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3760                                                 check_added_monitors!(nodes[1], 1);
3761                                         }
3762                                 }
3763                         }
3764                 }
3765         }
3766
3767         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3768         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3769         if messages_delivered < 3 {
3770                 if simulate_broken_lnd {
3771                         // lnd has a long-standing bug where they send a funding_locked prior to a
3772                         // channel_reestablish if you reconnect prior to funding_locked time.
3773                         //
3774                         // Here we simulate that behavior, delivering a funding_locked immediately on
3775                         // reconnect. Note that we don't bother skipping the now-duplicate funding_locked sent
3776                         // in `reconnect_nodes` but we currently don't fail based on that.
3777                         //
3778                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3779                         nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked.as_ref().unwrap().0);
3780                 }
3781                 // Even if the funding_locked messages get exchanged, as long as nothing further was
3782                 // received on either side, both sides will need to resend them.
3783                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3784         } else if messages_delivered == 3 {
3785                 // nodes[0] still wants its RAA + commitment_signed
3786                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3787         } else if messages_delivered == 4 {
3788                 // nodes[0] still wants its commitment_signed
3789                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3790         } else if messages_delivered == 5 {
3791                 // nodes[1] still wants its final RAA
3792                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3793         } else if messages_delivered == 6 {
3794                 // Everything was delivered...
3795                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3796         }
3797
3798         let events_1 = nodes[1].node.get_and_clear_pending_events();
3799         assert_eq!(events_1.len(), 1);
3800         match events_1[0] {
3801                 Event::PendingHTLCsForwardable { .. } => { },
3802                 _ => panic!("Unexpected event"),
3803         };
3804
3805         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3806         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3807         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3808
3809         nodes[1].node.process_pending_htlc_forwards();
3810
3811         let events_2 = nodes[1].node.get_and_clear_pending_events();
3812         assert_eq!(events_2.len(), 1);
3813         match events_2[0] {
3814                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
3815                         assert_eq!(payment_hash_1, *payment_hash);
3816                         assert_eq!(amt, 1000000);
3817                         match &purpose {
3818                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3819                                         assert!(payment_preimage.is_none());
3820                                         assert_eq!(payment_secret_1, *payment_secret);
3821                                 },
3822                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3823                         }
3824                 },
3825                 _ => panic!("Unexpected event"),
3826         }
3827
3828         nodes[1].node.claim_funds(payment_preimage_1);
3829         check_added_monitors!(nodes[1], 1);
3830
3831         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3832         assert_eq!(events_3.len(), 1);
3833         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3834                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3835                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3836                         assert!(updates.update_add_htlcs.is_empty());
3837                         assert!(updates.update_fail_htlcs.is_empty());
3838                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3839                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3840                         assert!(updates.update_fee.is_none());
3841                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3842                 },
3843                 _ => panic!("Unexpected event"),
3844         };
3845
3846         if messages_delivered >= 1 {
3847                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3848
3849                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3850                 assert_eq!(events_4.len(), 1);
3851                 match events_4[0] {
3852                         Event::PaymentSent { ref payment_preimage } => {
3853                                 assert_eq!(payment_preimage_1, *payment_preimage);
3854                         },
3855                         _ => panic!("Unexpected event"),
3856                 }
3857
3858                 if messages_delivered >= 2 {
3859                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3860                         check_added_monitors!(nodes[0], 1);
3861                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3862
3863                         if messages_delivered >= 3 {
3864                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3865                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3866                                 check_added_monitors!(nodes[1], 1);
3867
3868                                 if messages_delivered >= 4 {
3869                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3870                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3871                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3872                                         check_added_monitors!(nodes[1], 1);
3873
3874                                         if messages_delivered >= 5 {
3875                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3876                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3877                                                 check_added_monitors!(nodes[0], 1);
3878                                         }
3879                                 }
3880                         }
3881                 }
3882         }
3883
3884         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3885         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3886         if messages_delivered < 2 {
3887                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3888                 if messages_delivered < 1 {
3889                         let events_4 = nodes[0].node.get_and_clear_pending_events();
3890                         assert_eq!(events_4.len(), 1);
3891                         match events_4[0] {
3892                                 Event::PaymentSent { ref payment_preimage } => {
3893                                         assert_eq!(payment_preimage_1, *payment_preimage);
3894                                 },
3895                                 _ => panic!("Unexpected event"),
3896                         }
3897                 } else {
3898                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3899                 }
3900         } else if messages_delivered == 2 {
3901                 // nodes[0] still wants its RAA + commitment_signed
3902                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3903         } else if messages_delivered == 3 {
3904                 // nodes[0] still wants its commitment_signed
3905                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3906         } else if messages_delivered == 4 {
3907                 // nodes[1] still wants its final RAA
3908                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3909         } else if messages_delivered == 5 {
3910                 // Everything was delivered...
3911                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3912         }
3913
3914         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3915         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3916         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3917
3918         // Channel should still work fine...
3919         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3920         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
3921                 &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
3922                 &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3923         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
3924         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
3925 }
3926
3927 #[test]
3928 fn test_drop_messages_peer_disconnect_a() {
3929         do_test_drop_messages_peer_disconnect(0, true);
3930         do_test_drop_messages_peer_disconnect(0, false);
3931         do_test_drop_messages_peer_disconnect(1, false);
3932         do_test_drop_messages_peer_disconnect(2, false);
3933 }
3934
3935 #[test]
3936 fn test_drop_messages_peer_disconnect_b() {
3937         do_test_drop_messages_peer_disconnect(3, false);
3938         do_test_drop_messages_peer_disconnect(4, false);
3939         do_test_drop_messages_peer_disconnect(5, false);
3940         do_test_drop_messages_peer_disconnect(6, false);
3941 }
3942
3943 #[test]
3944 fn test_funding_peer_disconnect() {
3945         // Test that we can lock in our funding tx while disconnected
3946         let chanmon_cfgs = create_chanmon_cfgs(2);
3947         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3948         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3949         let persister: test_utils::TestPersister;
3950         let new_chain_monitor: test_utils::TestChainMonitor;
3951         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
3952         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3953         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
3954
3955         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3956         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3957
3958         confirm_transaction(&nodes[0], &tx);
3959         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
3960         assert_eq!(events_1.len(), 1);
3961         match events_1[0] {
3962                 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
3963                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
3964                 },
3965                 _ => panic!("Unexpected event"),
3966         }
3967
3968         reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3969
3970         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3971         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3972
3973         confirm_transaction(&nodes[1], &tx);
3974         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3975         assert_eq!(events_2.len(), 2);
3976         let funding_locked = match events_2[0] {
3977                 MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
3978                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3979                         msg.clone()
3980                 },
3981                 _ => panic!("Unexpected event"),
3982         };
3983         let bs_announcement_sigs = match events_2[1] {
3984                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3985                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3986                         msg.clone()
3987                 },
3988                 _ => panic!("Unexpected event"),
3989         };
3990
3991         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3992
3993         nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked);
3994         nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
3995         let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
3996         assert_eq!(events_3.len(), 2);
3997         let as_announcement_sigs = match events_3[0] {
3998                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3999                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
4000                         msg.clone()
4001                 },
4002                 _ => panic!("Unexpected event"),
4003         };
4004         let (as_announcement, as_update) = match events_3[1] {
4005                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
4006                         (msg.clone(), update_msg.clone())
4007                 },
4008                 _ => panic!("Unexpected event"),
4009         };
4010
4011         nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
4012         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
4013         assert_eq!(events_4.len(), 1);
4014         let (_, bs_update) = match events_4[0] {
4015                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
4016                         (msg.clone(), update_msg.clone())
4017                 },
4018                 _ => panic!("Unexpected event"),
4019         };
4020
4021         nodes[0].net_graph_msg_handler.handle_channel_announcement(&as_announcement).unwrap();
4022         nodes[0].net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
4023         nodes[0].net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
4024
4025         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4026         let logger = test_utils::TestLogger::new();
4027         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
4028         let (payment_preimage, _, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
4029         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
4030
4031         // Check that after deserialization and reconnection we can still generate an identical
4032         // channel_announcement from the cached signatures.
4033         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4034
4035         let nodes_0_serialized = nodes[0].node.encode();
4036         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4037         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4038
4039         persister = test_utils::TestPersister::new();
4040         let keys_manager = &chanmon_cfgs[0].keys_manager;
4041         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
4042         nodes[0].chain_monitor = &new_chain_monitor;
4043         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4044         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4045                 &mut chan_0_monitor_read, keys_manager).unwrap();
4046         assert!(chan_0_monitor_read.is_empty());
4047
4048         let mut nodes_0_read = &nodes_0_serialized[..];
4049         let (_, nodes_0_deserialized_tmp) = {
4050                 let mut channel_monitors = HashMap::new();
4051                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4052                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4053                         default_config: UserConfig::default(),
4054                         keys_manager,
4055                         fee_estimator: node_cfgs[0].fee_estimator,
4056                         chain_monitor: nodes[0].chain_monitor,
4057                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4058                         logger: nodes[0].logger,
4059                         channel_monitors,
4060                 }).unwrap()
4061         };
4062         nodes_0_deserialized = nodes_0_deserialized_tmp;
4063         assert!(nodes_0_read.is_empty());
4064
4065         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4066         nodes[0].node = &nodes_0_deserialized;
4067         check_added_monitors!(nodes[0], 1);
4068
4069         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4070
4071         // as_announcement should be re-generated exactly by broadcast_node_announcement.
4072         nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
4073         let msgs = nodes[0].node.get_and_clear_pending_msg_events();
4074         let mut found_announcement = false;
4075         for event in msgs.iter() {
4076                 match event {
4077                         MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => {
4078                                 if *msg == as_announcement { found_announcement = true; }
4079                         },
4080                         MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
4081                         _ => panic!("Unexpected event"),
4082                 }
4083         }
4084         assert!(found_announcement);
4085 }
4086
4087 #[test]
4088 fn test_drop_messages_peer_disconnect_dual_htlc() {
4089         // Test that we can handle reconnecting when both sides of a channel have pending
4090         // commitment_updates when we disconnect.
4091         let chanmon_cfgs = create_chanmon_cfgs(2);
4092         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4093         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4094         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4095         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4096         let logger = test_utils::TestLogger::new();
4097
4098         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4099
4100         // Now try to send a second payment which will fail to send
4101         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
4102         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4103         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
4104         nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
4105         check_added_monitors!(nodes[0], 1);
4106
4107         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4108         assert_eq!(events_1.len(), 1);
4109         match events_1[0] {
4110                 MessageSendEvent::UpdateHTLCs { .. } => {},
4111                 _ => panic!("Unexpected event"),
4112         }
4113
4114         assert!(nodes[1].node.claim_funds(payment_preimage_1));
4115         check_added_monitors!(nodes[1], 1);
4116
4117         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4118         assert_eq!(events_2.len(), 1);
4119         match events_2[0] {
4120                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4121                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4122                         assert!(update_add_htlcs.is_empty());
4123                         assert_eq!(update_fulfill_htlcs.len(), 1);
4124                         assert!(update_fail_htlcs.is_empty());
4125                         assert!(update_fail_malformed_htlcs.is_empty());
4126                         assert!(update_fee.is_none());
4127
4128                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4129                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4130                         assert_eq!(events_3.len(), 1);
4131                         match events_3[0] {
4132                                 Event::PaymentSent { ref payment_preimage } => {
4133                                         assert_eq!(*payment_preimage, payment_preimage_1);
4134                                 },
4135                                 _ => panic!("Unexpected event"),
4136                         }
4137
4138                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4139                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4140                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4141                         check_added_monitors!(nodes[0], 1);
4142                 },
4143                 _ => panic!("Unexpected event"),
4144         }
4145
4146         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4147         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4148
4149         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4150         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4151         assert_eq!(reestablish_1.len(), 1);
4152         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4153         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4154         assert_eq!(reestablish_2.len(), 1);
4155
4156         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4157         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4158         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4159         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4160
4161         assert!(as_resp.0.is_none());
4162         assert!(bs_resp.0.is_none());
4163
4164         assert!(bs_resp.1.is_none());
4165         assert!(bs_resp.2.is_none());
4166
4167         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4168
4169         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4170         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4171         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4172         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4173         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4174         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4175         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4176         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4177         // No commitment_signed so get_event_msg's assert(len == 1) passes
4178         check_added_monitors!(nodes[1], 1);
4179
4180         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4181         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4182         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4183         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4184         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4185         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4186         assert!(bs_second_commitment_signed.update_fee.is_none());
4187         check_added_monitors!(nodes[1], 1);
4188
4189         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4190         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4191         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4192         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4193         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4194         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4195         assert!(as_commitment_signed.update_fee.is_none());
4196         check_added_monitors!(nodes[0], 1);
4197
4198         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4199         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4200         // No commitment_signed so get_event_msg's assert(len == 1) passes
4201         check_added_monitors!(nodes[0], 1);
4202
4203         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4204         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4205         // No commitment_signed so get_event_msg's assert(len == 1) passes
4206         check_added_monitors!(nodes[1], 1);
4207
4208         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4209         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4210         check_added_monitors!(nodes[1], 1);
4211
4212         expect_pending_htlcs_forwardable!(nodes[1]);
4213
4214         let events_5 = nodes[1].node.get_and_clear_pending_events();
4215         assert_eq!(events_5.len(), 1);
4216         match events_5[0] {
4217                 Event::PaymentReceived { ref payment_hash, ref purpose, .. } => {
4218                         assert_eq!(payment_hash_2, *payment_hash);
4219                         match &purpose {
4220                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4221                                         assert!(payment_preimage.is_none());
4222                                         assert_eq!(payment_secret_2, *payment_secret);
4223                                 },
4224                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4225                         }
4226                 },
4227                 _ => panic!("Unexpected event"),
4228         }
4229
4230         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4231         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4232         check_added_monitors!(nodes[0], 1);
4233
4234         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4235 }
4236
4237 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4238         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4239         // to avoid our counterparty failing the channel.
4240         let chanmon_cfgs = create_chanmon_cfgs(2);
4241         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4242         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4243         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4244
4245         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4246         let logger = test_utils::TestLogger::new();
4247
4248         let our_payment_hash = if send_partial_mpp {
4249                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4250                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4251                 let (_, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
4252                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4253                 // indicates there are more HTLCs coming.
4254                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4255                 nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
4256                 check_added_monitors!(nodes[0], 1);
4257                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4258                 assert_eq!(events.len(), 1);
4259                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4260                 // hop should *not* yet generate any PaymentReceived event(s).
4261                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4262                 our_payment_hash
4263         } else {
4264                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4265         };
4266
4267         let mut block = Block {
4268                 header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
4269                 txdata: vec![],
4270         };
4271         connect_block(&nodes[0], &block);
4272         connect_block(&nodes[1], &block);
4273         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4274         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4275                 block.header.prev_blockhash = block.block_hash();
4276                 connect_block(&nodes[0], &block);
4277                 connect_block(&nodes[1], &block);
4278         }
4279
4280         expect_pending_htlcs_forwardable!(nodes[1]);
4281
4282         check_added_monitors!(nodes[1], 1);
4283         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4284         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4285         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4286         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4287         assert!(htlc_timeout_updates.update_fee.is_none());
4288
4289         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4290         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4291         // 100_000 msat as u64, followed by the height at which we failed back above
4292         let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec();
4293         expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1));
4294         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4295 }
4296
4297 #[test]
4298 fn test_htlc_timeout() {
4299         do_test_htlc_timeout(true);
4300         do_test_htlc_timeout(false);
4301 }
4302
4303 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4304         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4305         let chanmon_cfgs = create_chanmon_cfgs(3);
4306         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4307         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4308         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4309         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4310         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
4311
4312         // Make sure all nodes are at the same starting height
4313         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4314         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4315         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4316
4317         let logger = test_utils::TestLogger::new();
4318
4319         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4320         let (_, first_payment_hash, first_payment_secret) = get_payment_preimage_hash!(nodes[2]);
4321         {
4322                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
4323                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4324                 nodes[1].node.send_payment(&route, first_payment_hash, &Some(first_payment_secret)).unwrap();
4325         }
4326         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4327         check_added_monitors!(nodes[1], 1);
4328
4329         // Now attempt to route a second payment, which should be placed in the holding cell
4330         let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[2]);
4331         if forwarded_htlc {
4332                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4333                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4334                 nodes[0].node.send_payment(&route, second_payment_hash, &Some(first_payment_secret)).unwrap();
4335                 check_added_monitors!(nodes[0], 1);
4336                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4337                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4338                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4339                 expect_pending_htlcs_forwardable!(nodes[1]);
4340                 check_added_monitors!(nodes[1], 0);
4341         } else {
4342                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
4343                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4344                 nodes[1].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
4345                 check_added_monitors!(nodes[1], 0);
4346         }
4347
4348         connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
4349         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4350         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4351         connect_blocks(&nodes[1], 1);
4352
4353         if forwarded_htlc {
4354                 expect_pending_htlcs_forwardable!(nodes[1]);
4355                 check_added_monitors!(nodes[1], 1);
4356                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4357                 assert_eq!(fail_commit.len(), 1);
4358                 match fail_commit[0] {
4359                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4360                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4361                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4362                         },
4363                         _ => unreachable!(),
4364                 }
4365                 expect_payment_failed!(nodes[0], second_payment_hash, false);
4366                 expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false);
4367         } else {
4368                 expect_payment_failed!(nodes[1], second_payment_hash, true);
4369         }
4370 }
4371
4372 #[test]
4373 fn test_holding_cell_htlc_add_timeouts() {
4374         do_test_holding_cell_htlc_add_timeouts(false);
4375         do_test_holding_cell_htlc_add_timeouts(true);
4376 }
4377
4378 #[test]
4379 fn test_invalid_channel_announcement() {
4380         //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
4381         let secp_ctx = Secp256k1::new();
4382         let chanmon_cfgs = create_chanmon_cfgs(2);
4383         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4384         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4385         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4386
4387         let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
4388
4389         let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
4390         let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
4391         let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4392         let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4393
4394         nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
4395
4396         let as_bitcoin_key = as_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
4397         let bs_bitcoin_key = bs_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
4398
4399         let as_network_key = nodes[0].node.get_our_node_id();
4400         let bs_network_key = nodes[1].node.get_our_node_id();
4401
4402         let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
4403
4404         let mut chan_announcement;
4405
4406         macro_rules! dummy_unsigned_msg {
4407                 () => {
4408                         msgs::UnsignedChannelAnnouncement {
4409                                 features: ChannelFeatures::known(),
4410                                 chain_hash: genesis_block(Network::Testnet).header.block_hash(),
4411                                 short_channel_id: as_chan.get_short_channel_id().unwrap(),
4412                                 node_id_1: if were_node_one { as_network_key } else { bs_network_key },
4413                                 node_id_2: if were_node_one { bs_network_key } else { as_network_key },
4414                                 bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
4415                                 bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
4416                                 excess_data: Vec::new(),
4417                         };
4418                 }
4419         }
4420
4421         macro_rules! sign_msg {
4422                 ($unsigned_msg: expr) => {
4423                         let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
4424                         let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_signer().inner.funding_key);
4425                         let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_signer().inner.funding_key);
4426                         let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
4427                         let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
4428                         chan_announcement = msgs::ChannelAnnouncement {
4429                                 node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
4430                                 node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
4431                                 bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
4432                                 bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
4433                                 contents: $unsigned_msg
4434                         }
4435                 }
4436         }
4437
4438         let unsigned_msg = dummy_unsigned_msg!();
4439         sign_msg!(unsigned_msg);
4440         assert_eq!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).unwrap(), true);
4441         let _ = nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
4442
4443         // Configured with Network::Testnet
4444         let mut unsigned_msg = dummy_unsigned_msg!();
4445         unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.block_hash();
4446         sign_msg!(unsigned_msg);
4447         assert!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).is_err());
4448
4449         let mut unsigned_msg = dummy_unsigned_msg!();
4450         unsigned_msg.chain_hash = BlockHash::hash(&[1,2,3,4,5,6,7,8,9]);
4451         sign_msg!(unsigned_msg);
4452         assert!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).is_err());
4453 }
4454
4455 #[test]
4456 fn test_no_txn_manager_serialize_deserialize() {
4457         let chanmon_cfgs = create_chanmon_cfgs(2);
4458         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4459         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4460         let logger: test_utils::TestLogger;
4461         let fee_estimator: test_utils::TestFeeEstimator;
4462         let persister: test_utils::TestPersister;
4463         let new_chain_monitor: test_utils::TestChainMonitor;
4464         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4465         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4466
4467         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
4468
4469         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4470
4471         let nodes_0_serialized = nodes[0].node.encode();
4472         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4473         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4474
4475         logger = test_utils::TestLogger::new();
4476         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4477         persister = test_utils::TestPersister::new();
4478         let keys_manager = &chanmon_cfgs[0].keys_manager;
4479         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4480         nodes[0].chain_monitor = &new_chain_monitor;
4481         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4482         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4483                 &mut chan_0_monitor_read, keys_manager).unwrap();
4484         assert!(chan_0_monitor_read.is_empty());
4485
4486         let mut nodes_0_read = &nodes_0_serialized[..];
4487         let config = UserConfig::default();
4488         let (_, nodes_0_deserialized_tmp) = {
4489                 let mut channel_monitors = HashMap::new();
4490                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4491                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4492                         default_config: config,
4493                         keys_manager,
4494                         fee_estimator: &fee_estimator,
4495                         chain_monitor: nodes[0].chain_monitor,
4496                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4497                         logger: &logger,
4498                         channel_monitors,
4499                 }).unwrap()
4500         };
4501         nodes_0_deserialized = nodes_0_deserialized_tmp;
4502         assert!(nodes_0_read.is_empty());
4503
4504         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4505         nodes[0].node = &nodes_0_deserialized;
4506         assert_eq!(nodes[0].node.list_channels().len(), 1);
4507         check_added_monitors!(nodes[0], 1);
4508
4509         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4510         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4511         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4512         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4513
4514         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4515         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4516         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4517         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4518
4519         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
4520         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
4521         for node in nodes.iter() {
4522                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
4523                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
4524                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
4525         }
4526
4527         send_payment(&nodes[0], &[&nodes[1]], 1000000);
4528 }
4529
4530 #[test]
4531 fn test_dup_htlc_onchain_fails_on_reload() {
4532         // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
4533         // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
4534         // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
4535         // the ChannelMonitor tells it to.
4536         //
4537         // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4538         // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4539         // PaymentFailed event appearing). However, because we may not serialize the relevant
4540         // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4541         // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4542         // and de-duplicates ChannelMonitor events.
4543         //
4544         // This tests that explicit tracking behavior.
4545         let chanmon_cfgs = create_chanmon_cfgs(2);
4546         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4547         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4548         let persister: test_utils::TestPersister;
4549         let new_chain_monitor: test_utils::TestChainMonitor;
4550         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4551         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4552
4553         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4554
4555         // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
4556         // nodes[0].
4557         let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
4558         nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
4559         check_closed_broadcast!(nodes[0], true);
4560         check_added_monitors!(nodes[0], 1);
4561
4562         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4563         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4564
4565         // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
4566         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
4567         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4568         assert_eq!(node_txn.len(), 3);
4569         assert_eq!(node_txn[0], node_txn[1]);
4570
4571         assert!(nodes[1].node.claim_funds(payment_preimage));
4572         check_added_monitors!(nodes[1], 1);
4573
4574         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4575         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4576         check_closed_broadcast!(nodes[1], true);
4577         check_added_monitors!(nodes[1], 1);
4578         let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4579
4580         header.prev_blockhash = nodes[0].best_block_hash();
4581         connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4582
4583         // Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4584         // fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4585         // happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4586         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4587         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4588
4589         header.prev_blockhash = nodes[0].best_block_hash();
4590         let claim_block = Block { header, txdata: claim_txn};
4591         connect_block(&nodes[0], &claim_block);
4592         expect_payment_sent!(nodes[0], payment_preimage);
4593
4594         // ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4595         // connected a highly-relevant block, it likely gets serialized out now.
4596         let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4597         nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4598
4599         // Now reload nodes[0]...
4600         persister = test_utils::TestPersister::new();
4601         let keys_manager = &chanmon_cfgs[0].keys_manager;
4602         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
4603         nodes[0].chain_monitor = &new_chain_monitor;
4604         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4605         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4606                 &mut chan_0_monitor_read, keys_manager).unwrap();
4607         assert!(chan_0_monitor_read.is_empty());
4608
4609         let (_, nodes_0_deserialized_tmp) = {
4610                 let mut channel_monitors = HashMap::new();
4611                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4612                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
4613                         ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
4614                                 default_config: Default::default(),
4615                                 keys_manager,
4616                                 fee_estimator: node_cfgs[0].fee_estimator,
4617                                 chain_monitor: nodes[0].chain_monitor,
4618                                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4619                                 logger: nodes[0].logger,
4620                                 channel_monitors,
4621                         }).unwrap()
4622         };
4623         nodes_0_deserialized = nodes_0_deserialized_tmp;
4624
4625         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4626         check_added_monitors!(nodes[0], 1);
4627         nodes[0].node = &nodes_0_deserialized;
4628
4629         // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
4630         // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
4631         // payment events should kick in, leaving us with no pending events here.
4632         let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
4633         nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
4634         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4635 }
4636
4637 #[test]
4638 fn test_manager_serialize_deserialize_events() {
4639         // This test makes sure the events field in ChannelManager survives de/serialization
4640         let chanmon_cfgs = create_chanmon_cfgs(2);
4641         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4642         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4643         let fee_estimator: test_utils::TestFeeEstimator;
4644         let persister: test_utils::TestPersister;
4645         let logger: test_utils::TestLogger;
4646         let new_chain_monitor: test_utils::TestChainMonitor;
4647         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4648         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4649
4650         // Start creating a channel, but stop right before broadcasting the funding transaction
4651         let channel_value = 100000;
4652         let push_msat = 10001;
4653         let a_flags = InitFeatures::known();
4654         let b_flags = InitFeatures::known();
4655         let node_a = nodes.remove(0);
4656         let node_b = nodes.remove(0);
4657         node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
4658         node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
4659         node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
4660
4661         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, channel_value, 42);
4662
4663         node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
4664         check_added_monitors!(node_a, 0);
4665
4666         node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
4667         {
4668                 let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
4669                 assert_eq!(added_monitors.len(), 1);
4670                 assert_eq!(added_monitors[0].0, funding_output);
4671                 added_monitors.clear();
4672         }
4673
4674         node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
4675         {
4676                 let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
4677                 assert_eq!(added_monitors.len(), 1);
4678                 assert_eq!(added_monitors[0].0, funding_output);
4679                 added_monitors.clear();
4680         }
4681         // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
4682
4683         nodes.push(node_a);
4684         nodes.push(node_b);
4685
4686         // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
4687         let nodes_0_serialized = nodes[0].node.encode();
4688         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4689         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4690
4691         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4692         logger = test_utils::TestLogger::new();
4693         persister = test_utils::TestPersister::new();
4694         let keys_manager = &chanmon_cfgs[0].keys_manager;
4695         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4696         nodes[0].chain_monitor = &new_chain_monitor;
4697         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4698         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4699                 &mut chan_0_monitor_read, keys_manager).unwrap();
4700         assert!(chan_0_monitor_read.is_empty());
4701
4702         let mut nodes_0_read = &nodes_0_serialized[..];
4703         let config = UserConfig::default();
4704         let (_, nodes_0_deserialized_tmp) = {
4705                 let mut channel_monitors = HashMap::new();
4706                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4707                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4708                         default_config: config,
4709                         keys_manager,
4710                         fee_estimator: &fee_estimator,
4711                         chain_monitor: nodes[0].chain_monitor,
4712                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4713                         logger: &logger,
4714                         channel_monitors,
4715                 }).unwrap()
4716         };
4717         nodes_0_deserialized = nodes_0_deserialized_tmp;
4718         assert!(nodes_0_read.is_empty());
4719
4720         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4721
4722         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4723         nodes[0].node = &nodes_0_deserialized;
4724
4725         // After deserializing, make sure the funding_transaction is still held by the channel manager
4726         let events_4 = nodes[0].node.get_and_clear_pending_events();
4727         assert_eq!(events_4.len(), 0);
4728         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4729         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
4730
4731         // Make sure the channel is functioning as though the de/serialization never happened
4732         assert_eq!(nodes[0].node.list_channels().len(), 1);
4733         check_added_monitors!(nodes[0], 1);
4734
4735         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4736         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4737         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4738         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4739
4740         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4741         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4742         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4743         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4744
4745         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
4746         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
4747         for node in nodes.iter() {
4748                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
4749                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
4750                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
4751         }
4752
4753         send_payment(&nodes[0], &[&nodes[1]], 1000000);
4754 }
4755
4756 #[test]
4757 fn test_simple_manager_serialize_deserialize() {
4758         let chanmon_cfgs = create_chanmon_cfgs(2);
4759         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4760         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4761         let logger: test_utils::TestLogger;
4762         let fee_estimator: test_utils::TestFeeEstimator;
4763         let persister: test_utils::TestPersister;
4764         let new_chain_monitor: test_utils::TestChainMonitor;
4765         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4766         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4767         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4768
4769         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4770         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4771
4772         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4773
4774         let nodes_0_serialized = nodes[0].node.encode();
4775         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4776         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4777
4778         logger = test_utils::TestLogger::new();
4779         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4780         persister = test_utils::TestPersister::new();
4781         let keys_manager = &chanmon_cfgs[0].keys_manager;
4782         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4783         nodes[0].chain_monitor = &new_chain_monitor;
4784         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4785         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4786                 &mut chan_0_monitor_read, keys_manager).unwrap();
4787         assert!(chan_0_monitor_read.is_empty());
4788
4789         let mut nodes_0_read = &nodes_0_serialized[..];
4790         let (_, nodes_0_deserialized_tmp) = {
4791                 let mut channel_monitors = HashMap::new();
4792                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4793                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4794                         default_config: UserConfig::default(),
4795                         keys_manager,
4796                         fee_estimator: &fee_estimator,
4797                         chain_monitor: nodes[0].chain_monitor,
4798                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4799                         logger: &logger,
4800                         channel_monitors,
4801                 }).unwrap()
4802         };
4803         nodes_0_deserialized = nodes_0_deserialized_tmp;
4804         assert!(nodes_0_read.is_empty());
4805
4806         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4807         nodes[0].node = &nodes_0_deserialized;
4808         check_added_monitors!(nodes[0], 1);
4809
4810         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4811
4812         fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
4813         claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
4814 }
4815
4816 #[test]
4817 fn test_manager_serialize_deserialize_inconsistent_monitor() {
4818         // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
4819         let chanmon_cfgs = create_chanmon_cfgs(4);
4820         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4821         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
4822         let logger: test_utils::TestLogger;
4823         let fee_estimator: test_utils::TestFeeEstimator;
4824         let persister: test_utils::TestPersister;
4825         let new_chain_monitor: test_utils::TestChainMonitor;
4826         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4827         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4828         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4829         create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
4830         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
4831
4832         let mut node_0_stale_monitors_serialized = Vec::new();
4833         for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
4834                 let mut writer = test_utils::TestVecWriter(Vec::new());
4835                 monitor.1.write(&mut writer).unwrap();
4836                 node_0_stale_monitors_serialized.push(writer.0);
4837         }
4838
4839         let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
4840
4841         // Serialize the ChannelManager here, but the monitor we keep up-to-date
4842         let nodes_0_serialized = nodes[0].node.encode();
4843
4844         route_payment(&nodes[0], &[&nodes[3]], 1000000);
4845         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4846         nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4847         nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4848
4849         // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
4850         // nodes[3])
4851         let mut node_0_monitors_serialized = Vec::new();
4852         for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
4853                 let mut writer = test_utils::TestVecWriter(Vec::new());
4854                 monitor.1.write(&mut writer).unwrap();
4855                 node_0_monitors_serialized.push(writer.0);
4856         }
4857
4858         logger = test_utils::TestLogger::new();
4859         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4860         persister = test_utils::TestPersister::new();
4861         let keys_manager = &chanmon_cfgs[0].keys_manager;
4862         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4863         nodes[0].chain_monitor = &new_chain_monitor;
4864
4865
4866         let mut node_0_stale_monitors = Vec::new();
4867         for serialized in node_0_stale_monitors_serialized.iter() {
4868                 let mut read = &serialized[..];
4869                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
4870                 assert!(read.is_empty());
4871                 node_0_stale_monitors.push(monitor);
4872         }
4873
4874         let mut node_0_monitors = Vec::new();
4875         for serialized in node_0_monitors_serialized.iter() {
4876                 let mut read = &serialized[..];
4877                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
4878                 assert!(read.is_empty());
4879                 node_0_monitors.push(monitor);
4880         }
4881
4882         let mut nodes_0_read = &nodes_0_serialized[..];
4883         if let Err(msgs::DecodeError::InvalidValue) =
4884                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4885                 default_config: UserConfig::default(),
4886                 keys_manager,
4887                 fee_estimator: &fee_estimator,
4888                 chain_monitor: nodes[0].chain_monitor,
4889                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4890                 logger: &logger,
4891                 channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
4892         }) { } else {
4893                 panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
4894         };
4895
4896         let mut nodes_0_read = &nodes_0_serialized[..];
4897         let (_, nodes_0_deserialized_tmp) =
4898                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4899                 default_config: UserConfig::default(),
4900                 keys_manager,
4901                 fee_estimator: &fee_estimator,
4902                 chain_monitor: nodes[0].chain_monitor,
4903                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4904                 logger: &logger,
4905                 channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
4906         }).unwrap();
4907         nodes_0_deserialized = nodes_0_deserialized_tmp;
4908         assert!(nodes_0_read.is_empty());
4909
4910         { // Channel close should result in a commitment tx
4911                 let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
4912                 assert_eq!(txn.len(), 1);
4913                 check_spends!(txn[0], funding_tx);
4914                 assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
4915         }
4916
4917         for monitor in node_0_monitors.drain(..) {
4918                 assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
4919                 check_added_monitors!(nodes[0], 1);
4920         }
4921         nodes[0].node = &nodes_0_deserialized;
4922
4923         // nodes[1] and nodes[2] have no lost state with nodes[0]...
4924         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4925         reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4926         //... and we can even still claim the payment!
4927         claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
4928
4929         nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4930         let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
4931         nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4932         nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
4933         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
4934         assert_eq!(msg_events.len(), 1);
4935         if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
4936                 match action {
4937                         &ErrorAction::SendErrorMessage { ref msg } => {
4938                                 assert_eq!(msg.channel_id, channel_id);
4939                         },
4940                         _ => panic!("Unexpected event!"),
4941                 }
4942         }
4943 }
4944
4945 macro_rules! check_spendable_outputs {
4946         ($node: expr, $keysinterface: expr) => {
4947                 {
4948                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4949                         let mut txn = Vec::new();
4950                         let mut all_outputs = Vec::new();
4951                         let secp_ctx = Secp256k1::new();
4952                         for event in events.drain(..) {
4953                                 match event {
4954                                         Event::SpendableOutputs { mut outputs } => {
4955                                                 for outp in outputs.drain(..) {
4956                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap());
4957                                                         all_outputs.push(outp);
4958                                                 }
4959                                         },
4960                                         _ => panic!("Unexpected event"),
4961                                 };
4962                         }
4963                         if all_outputs.len() > 1 {
4964                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) {
4965                                         txn.push(tx);
4966                                 }
4967                         }
4968                         txn
4969                 }
4970         }
4971 }
4972
4973 #[test]
4974 fn test_claim_sizeable_push_msat() {
4975         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4976         let chanmon_cfgs = create_chanmon_cfgs(2);
4977         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4978         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4979         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4980
4981         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
4982         nodes[1].node.force_close_channel(&chan.2).unwrap();
4983         check_closed_broadcast!(nodes[1], true);
4984         check_added_monitors!(nodes[1], 1);
4985         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4986         assert_eq!(node_txn.len(), 1);
4987         check_spends!(node_txn[0], chan.3);
4988         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4989
4990         mine_transaction(&nodes[1], &node_txn[0]);
4991         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4992
4993         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4994         assert_eq!(spend_txn.len(), 1);
4995         assert_eq!(spend_txn[0].input.len(), 1);
4996         check_spends!(spend_txn[0], node_txn[0]);
4997         assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
4998 }
4999
5000 #[test]
5001 fn test_claim_on_remote_sizeable_push_msat() {
5002         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
5003         // to_remote output is encumbered by a P2WPKH
5004         let chanmon_cfgs = create_chanmon_cfgs(2);
5005         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5006         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5007         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5008
5009         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
5010         nodes[0].node.force_close_channel(&chan.2).unwrap();
5011         check_closed_broadcast!(nodes[0], true);
5012         check_added_monitors!(nodes[0], 1);
5013
5014         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5015         assert_eq!(node_txn.len(), 1);
5016         check_spends!(node_txn[0], chan.3);
5017         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
5018
5019         mine_transaction(&nodes[1], &node_txn[0]);
5020         check_closed_broadcast!(nodes[1], true);
5021         check_added_monitors!(nodes[1], 1);
5022         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5023
5024         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5025         assert_eq!(spend_txn.len(), 1);
5026         check_spends!(spend_txn[0], node_txn[0]);
5027 }
5028
5029 #[test]
5030 fn test_claim_on_remote_revoked_sizeable_push_msat() {
5031         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
5032         // to_remote output is encumbered by a P2WPKH
5033
5034         let chanmon_cfgs = create_chanmon_cfgs(2);
5035         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5036         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5037         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5038
5039         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, InitFeatures::known(), InitFeatures::known());
5040         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5041         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
5042         assert_eq!(revoked_local_txn[0].input.len(), 1);
5043         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
5044
5045         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5046         mine_transaction(&nodes[1], &revoked_local_txn[0]);
5047         check_closed_broadcast!(nodes[1], true);
5048         check_added_monitors!(nodes[1], 1);
5049
5050         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5051         mine_transaction(&nodes[1], &node_txn[0]);
5052         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5053
5054         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5055         assert_eq!(spend_txn.len(), 3);
5056         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
5057         check_spends!(spend_txn[1], node_txn[0]);
5058         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
5059 }
5060
5061 #[test]
5062 fn test_static_spendable_outputs_preimage_tx() {
5063         let chanmon_cfgs = create_chanmon_cfgs(2);
5064         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5065         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5066         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5067
5068         // Create some initial channels
5069         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5070
5071         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5072
5073         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5074         assert_eq!(commitment_tx[0].input.len(), 1);
5075         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
5076
5077         // Settle A's commitment tx on B's chain
5078         assert!(nodes[1].node.claim_funds(payment_preimage));
5079         check_added_monitors!(nodes[1], 1);
5080         mine_transaction(&nodes[1], &commitment_tx[0]);
5081         check_added_monitors!(nodes[1], 1);
5082         let events = nodes[1].node.get_and_clear_pending_msg_events();
5083         match events[0] {
5084                 MessageSendEvent::UpdateHTLCs { .. } => {},
5085                 _ => panic!("Unexpected event"),
5086         }
5087         match events[1] {
5088                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5089                 _ => panic!("Unexepected event"),
5090         }
5091
5092         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
5093         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx
5094         assert_eq!(node_txn.len(), 3);
5095         check_spends!(node_txn[0], commitment_tx[0]);
5096         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5097         check_spends!(node_txn[1], chan_1.3);
5098         check_spends!(node_txn[2], node_txn[1]);
5099
5100         mine_transaction(&nodes[1], &node_txn[0]);
5101         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5102
5103         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5104         assert_eq!(spend_txn.len(), 1);
5105         check_spends!(spend_txn[0], node_txn[0]);
5106 }
5107
5108 #[test]
5109 fn test_static_spendable_outputs_timeout_tx() {
5110         let chanmon_cfgs = create_chanmon_cfgs(2);
5111         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5112         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5113         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5114
5115         // Create some initial channels
5116         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5117
5118         // Rebalance the network a bit by relaying one payment through all the channels ...
5119         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5120
5121         let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
5122
5123         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5124         assert_eq!(commitment_tx[0].input.len(), 1);
5125         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
5126
5127         // Settle A's commitment tx on B' chain
5128         mine_transaction(&nodes[1], &commitment_tx[0]);
5129         check_added_monitors!(nodes[1], 1);
5130         let events = nodes[1].node.get_and_clear_pending_msg_events();
5131         match events[0] {
5132                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5133                 _ => panic!("Unexpected event"),
5134         }
5135         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5136
5137         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
5138         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
5139         assert_eq!(node_txn.len(), 2); // ChannelManager : 1 local commitent tx, ChannelMonitor: timeout tx
5140         check_spends!(node_txn[0], chan_1.3.clone());
5141         check_spends!(node_txn[1],  commitment_tx[0].clone());
5142         assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5143
5144         mine_transaction(&nodes[1], &node_txn[1]);
5145         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5146         expect_payment_failed!(nodes[1], our_payment_hash, true);
5147
5148         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5149         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
5150         check_spends!(spend_txn[0], commitment_tx[0]);
5151         check_spends!(spend_txn[1], node_txn[1]);
5152         check_spends!(spend_txn[2], node_txn[1], commitment_tx[0]); // All outputs
5153 }
5154
5155 #[test]
5156 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
5157         let chanmon_cfgs = create_chanmon_cfgs(2);
5158         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5159         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5160         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5161
5162         // Create some initial channels
5163         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5164
5165         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5166         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5167         assert_eq!(revoked_local_txn[0].input.len(), 1);
5168         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5169
5170         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5171
5172         mine_transaction(&nodes[1], &revoked_local_txn[0]);
5173         check_closed_broadcast!(nodes[1], true);
5174         check_added_monitors!(nodes[1], 1);
5175
5176         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5177         assert_eq!(node_txn.len(), 2);
5178         assert_eq!(node_txn[0].input.len(), 2);
5179         check_spends!(node_txn[0], revoked_local_txn[0]);
5180
5181         mine_transaction(&nodes[1], &node_txn[0]);
5182         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5183
5184         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5185         assert_eq!(spend_txn.len(), 1);
5186         check_spends!(spend_txn[0], node_txn[0]);
5187 }
5188
5189 #[test]
5190 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
5191         let mut chanmon_cfgs = create_chanmon_cfgs(2);
5192         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
5193         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5194         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5195         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5196
5197         // Create some initial channels
5198         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5199
5200         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5201         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5202         assert_eq!(revoked_local_txn[0].input.len(), 1);
5203         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5204
5205         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5206
5207         // A will generate HTLC-Timeout from revoked commitment tx
5208         mine_transaction(&nodes[0], &revoked_local_txn[0]);
5209         check_closed_broadcast!(nodes[0], true);
5210         check_added_monitors!(nodes[0], 1);
5211         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5212
5213         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5214         assert_eq!(revoked_htlc_txn.len(), 2);
5215         check_spends!(revoked_htlc_txn[0], chan_1.3);
5216         assert_eq!(revoked_htlc_txn[1].input.len(), 1);
5217         assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5218         check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]);
5219         assert_ne!(revoked_htlc_txn[1].lock_time, 0); // HTLC-Timeout
5220
5221         // B will generate justice tx from A's revoked commitment/HTLC tx
5222         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5223         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
5224         check_closed_broadcast!(nodes[1], true);
5225         check_added_monitors!(nodes[1], 1);
5226
5227         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5228         assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
5229         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
5230         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
5231         // transactions next...
5232         assert_eq!(node_txn[0].input.len(), 3);
5233         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[1]);
5234
5235         assert_eq!(node_txn[1].input.len(), 2);
5236         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[1]);
5237         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[1].txid() {
5238                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
5239         } else {
5240                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[1].txid());
5241                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[1].input[0].previous_output);
5242         }
5243
5244         assert_eq!(node_txn[2].input.len(), 1);
5245         check_spends!(node_txn[2], chan_1.3);
5246
5247         mine_transaction(&nodes[1], &node_txn[1]);
5248         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5249
5250         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
5251         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5252         assert_eq!(spend_txn.len(), 1);
5253         assert_eq!(spend_txn[0].input.len(), 1);
5254         check_spends!(spend_txn[0], node_txn[1]);
5255 }
5256
5257 #[test]
5258 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
5259         let mut chanmon_cfgs = create_chanmon_cfgs(2);
5260         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
5261         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5262         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5263         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5264
5265         // Create some initial channels
5266         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5267
5268         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5269         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5270         assert_eq!(revoked_local_txn[0].input.len(), 1);
5271         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5272
5273         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
5274         assert_eq!(revoked_local_txn[0].output.len(), 2);
5275
5276         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5277
5278         // B will generate HTLC-Success from revoked commitment tx
5279         mine_transaction(&nodes[1], &revoked_local_txn[0]);
5280         check_closed_broadcast!(nodes[1], true);
5281         check_added_monitors!(nodes[1], 1);
5282         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5283
5284         assert_eq!(revoked_htlc_txn.len(), 2);
5285         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
5286         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5287         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
5288
5289         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
5290         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
5291         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
5292
5293         // A will generate justice tx from B's revoked commitment/HTLC tx
5294         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5295         connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
5296         check_closed_broadcast!(nodes[0], true);
5297         check_added_monitors!(nodes[0], 1);
5298
5299         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5300         assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
5301
5302         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
5303         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
5304         // transactions next...
5305         assert_eq!(node_txn[0].input.len(), 2);
5306         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
5307         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
5308                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5309         } else {
5310                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
5311                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5312         }
5313
5314         assert_eq!(node_txn[1].input.len(), 1);
5315         check_spends!(node_txn[1], revoked_htlc_txn[0]);
5316
5317         check_spends!(node_txn[2], chan_1.3);
5318
5319         mine_transaction(&nodes[0], &node_txn[1]);
5320         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5321
5322         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
5323         // didn't try to generate any new transactions.
5324
5325         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
5326         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5327         assert_eq!(spend_txn.len(), 3);
5328         assert_eq!(spend_txn[0].input.len(), 1);
5329         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
5330         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5331         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
5332         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
5333 }
5334
5335 #[test]
5336 fn test_onchain_to_onchain_claim() {
5337         // Test that in case of channel closure, we detect the state of output and claim HTLC
5338         // on downstream peer's remote commitment tx.
5339         // First, have C claim an HTLC against its own latest commitment transaction.
5340         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
5341         // channel.
5342         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
5343         // gets broadcast.
5344
5345         let chanmon_cfgs = create_chanmon_cfgs(3);
5346         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5347         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5348         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5349
5350         // Create some initial channels
5351         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5352         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5353
5354         // Ensure all nodes are at the same height
5355         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5356         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5357         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5358         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5359
5360         // Rebalance the network a bit by relaying one payment through all the channels ...
5361         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
5362         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
5363
5364         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
5365         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
5366         check_spends!(commitment_tx[0], chan_2.3);
5367         nodes[2].node.claim_funds(payment_preimage);
5368         check_added_monitors!(nodes[2], 1);
5369         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
5370         assert!(updates.update_add_htlcs.is_empty());
5371         assert!(updates.update_fail_htlcs.is_empty());
5372         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5373         assert!(updates.update_fail_malformed_htlcs.is_empty());
5374
5375         mine_transaction(&nodes[2], &commitment_tx[0]);
5376         check_closed_broadcast!(nodes[2], true);
5377         check_added_monitors!(nodes[2], 1);
5378
5379         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
5380         assert_eq!(c_txn.len(), 3);
5381         assert_eq!(c_txn[0], c_txn[2]);
5382         assert_eq!(commitment_tx[0], c_txn[1]);
5383         check_spends!(c_txn[1], chan_2.3);
5384         check_spends!(c_txn[2], c_txn[1]);
5385         assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
5386         assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5387         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
5388         assert_eq!(c_txn[0].lock_time, 0); // Success tx
5389
5390         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
5391         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
5392         connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
5393         check_added_monitors!(nodes[1], 1);
5394         expect_payment_forwarded!(nodes[1], Some(1000), true);
5395         {
5396                 let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5397                 // ChannelMonitor: claim tx
5398                 assert_eq!(b_txn.len(), 1);
5399                 check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
5400                 b_txn.clear();
5401         }
5402         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
5403         assert_eq!(msg_events.len(), 3);
5404         check_added_monitors!(nodes[1], 1);
5405         match msg_events[0] {
5406                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5407                 _ => panic!("Unexpected event"),
5408         }
5409         match msg_events[1] {
5410                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
5411                 _ => panic!("Unexpected event"),
5412         }
5413         match msg_events[2] {
5414                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
5415                         assert!(update_add_htlcs.is_empty());
5416                         assert!(update_fail_htlcs.is_empty());
5417                         assert_eq!(update_fulfill_htlcs.len(), 1);
5418                         assert!(update_fail_malformed_htlcs.is_empty());
5419                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
5420                 },
5421                 _ => panic!("Unexpected event"),
5422         };
5423         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
5424         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5425         mine_transaction(&nodes[1], &commitment_tx[0]);
5426         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5427         // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
5428         assert_eq!(b_txn.len(), 3);
5429         check_spends!(b_txn[1], chan_1.3);
5430         check_spends!(b_txn[2], b_txn[1]);
5431         check_spends!(b_txn[0], commitment_tx[0]);
5432         assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5433         assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
5434         assert_eq!(b_txn[0].lock_time, 0); // Success tx
5435
5436         check_closed_broadcast!(nodes[1], true);
5437         check_added_monitors!(nodes[1], 1);
5438 }
5439
5440 #[test]
5441 fn test_duplicate_payment_hash_one_failure_one_success() {
5442         // Topology : A --> B --> C --> D
5443         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5444         // Note that because C will refuse to generate two payment secrets for the same payment hash,
5445         // we forward one of the payments onwards to D.
5446         let chanmon_cfgs = create_chanmon_cfgs(4);
5447         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5448         // When this test was written, the default base fee floated based on the HTLC count.
5449         // It is now fixed, so we simply set the fee to the expected value here.
5450         let mut config = test_default_channel_config();
5451         config.channel_options.forwarding_fee_base_msat = 196;
5452         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5453                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5454         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5455
5456         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5457         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5458         create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
5459
5460         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5461         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5462         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5463         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5464         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5465
5466         let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
5467
5468         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, 0).unwrap();
5469         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5470         // script push size limit so that the below script length checks match
5471         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5472         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
5473                 &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 900000, TEST_FINAL_CLTV - 40, nodes[0].logger).unwrap();
5474         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 900000, duplicate_payment_hash, payment_secret);
5475
5476         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5477         assert_eq!(commitment_txn[0].input.len(), 1);
5478         check_spends!(commitment_txn[0], chan_2.3);
5479
5480         mine_transaction(&nodes[1], &commitment_txn[0]);
5481         check_closed_broadcast!(nodes[1], true);
5482         check_added_monitors!(nodes[1], 1);
5483         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
5484
5485         let htlc_timeout_tx;
5486         { // Extract one of the two HTLC-Timeout transaction
5487                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5488                 // ChannelMonitor: timeout tx * 3, ChannelManager: local commitment tx
5489                 assert_eq!(node_txn.len(), 4);
5490                 check_spends!(node_txn[0], chan_2.3);
5491
5492                 check_spends!(node_txn[1], commitment_txn[0]);
5493                 assert_eq!(node_txn[1].input.len(), 1);
5494                 check_spends!(node_txn[2], commitment_txn[0]);
5495                 assert_eq!(node_txn[2].input.len(), 1);
5496                 assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
5497                 check_spends!(node_txn[3], commitment_txn[0]);
5498                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
5499
5500                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5501                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5502                 assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5503                 htlc_timeout_tx = node_txn[1].clone();
5504         }
5505
5506         nodes[2].node.claim_funds(our_payment_preimage);
5507         mine_transaction(&nodes[2], &commitment_txn[0]);
5508         check_added_monitors!(nodes[2], 2);
5509         let events = nodes[2].node.get_and_clear_pending_msg_events();
5510         match events[0] {
5511                 MessageSendEvent::UpdateHTLCs { .. } => {},
5512                 _ => panic!("Unexpected event"),
5513         }
5514         match events[1] {
5515                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5516                 _ => panic!("Unexepected event"),
5517         }
5518         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5519         assert_eq!(htlc_success_txn.len(), 5); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs), ChannelManager: local commitment tx + HTLC-Success txn (*2 due to 2-HTLC outputs)
5520         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5521         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5522         assert_eq!(htlc_success_txn[0].input.len(), 1);
5523         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5524         assert_eq!(htlc_success_txn[1].input.len(), 1);
5525         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5526         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5527         assert_eq!(htlc_success_txn[2], commitment_txn[0]);
5528         assert_eq!(htlc_success_txn[3], htlc_success_txn[0]);
5529         assert_eq!(htlc_success_txn[4], htlc_success_txn[1]);
5530         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5531
5532         mine_transaction(&nodes[1], &htlc_timeout_tx);
5533         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5534         expect_pending_htlcs_forwardable!(nodes[1]);
5535         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5536         assert!(htlc_updates.update_add_htlcs.is_empty());
5537         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5538         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5539         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5540         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5541         check_added_monitors!(nodes[1], 1);
5542
5543         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5544         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5545         {
5546                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5547                 expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true);
5548         }
5549         expect_payment_failed!(nodes[0], duplicate_payment_hash, false);
5550
5551         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5552         // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
5553         // and nodes[2] fee) is rounded down and then claimed in full.
5554         mine_transaction(&nodes[1], &htlc_success_txn[0]);
5555         expect_payment_forwarded!(nodes[1], Some(196*2), true);
5556         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5557         assert!(updates.update_add_htlcs.is_empty());
5558         assert!(updates.update_fail_htlcs.is_empty());
5559         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5560         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5561         assert!(updates.update_fail_malformed_htlcs.is_empty());
5562         check_added_monitors!(nodes[1], 1);
5563
5564         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5565         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5566
5567         let events = nodes[0].node.get_and_clear_pending_events();
5568         match events[0] {
5569                 Event::PaymentSent { ref payment_preimage } => {
5570                         assert_eq!(*payment_preimage, our_payment_preimage);
5571                 }
5572                 _ => panic!("Unexpected event"),
5573         }
5574 }
5575
5576 #[test]
5577 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5578         let chanmon_cfgs = create_chanmon_cfgs(2);
5579         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5580         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5581         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5582
5583         // Create some initial channels
5584         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5585
5586         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
5587         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5588         assert_eq!(local_txn.len(), 1);
5589         assert_eq!(local_txn[0].input.len(), 1);
5590         check_spends!(local_txn[0], chan_1.3);
5591
5592         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5593         nodes[1].node.claim_funds(payment_preimage);
5594         check_added_monitors!(nodes[1], 1);
5595         mine_transaction(&nodes[1], &local_txn[0]);
5596         check_added_monitors!(nodes[1], 1);
5597         let events = nodes[1].node.get_and_clear_pending_msg_events();
5598         match events[0] {
5599                 MessageSendEvent::UpdateHTLCs { .. } => {},
5600                 _ => panic!("Unexpected event"),
5601         }
5602         match events[1] {
5603                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5604                 _ => panic!("Unexepected event"),
5605         }
5606         let node_tx = {
5607                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5608                 assert_eq!(node_txn.len(), 3);
5609                 assert_eq!(node_txn[0], node_txn[2]);
5610                 assert_eq!(node_txn[1], local_txn[0]);
5611                 assert_eq!(node_txn[0].input.len(), 1);
5612                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5613                 check_spends!(node_txn[0], local_txn[0]);
5614                 node_txn[0].clone()
5615         };
5616
5617         mine_transaction(&nodes[1], &node_tx);
5618         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5619
5620         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5621         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5622         assert_eq!(spend_txn.len(), 1);
5623         assert_eq!(spend_txn[0].input.len(), 1);
5624         check_spends!(spend_txn[0], node_tx);
5625         assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
5626 }
5627
5628 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5629         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5630         // unrevoked commitment transaction.
5631         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5632         // a remote RAA before they could be failed backwards (and combinations thereof).
5633         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5634         // use the same payment hashes.
5635         // Thus, we use a six-node network:
5636         //
5637         // A \         / E
5638         //    - C - D -
5639         // B /         \ F
5640         // And test where C fails back to A/B when D announces its latest commitment transaction
5641         let chanmon_cfgs = create_chanmon_cfgs(6);
5642         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5643         // When this test was written, the default base fee floated based on the HTLC count.
5644         // It is now fixed, so we simply set the fee to the expected value here.
5645         let mut config = test_default_channel_config();
5646         config.channel_options.forwarding_fee_base_msat = 196;
5647         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5648                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5649         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5650         let logger = test_utils::TestLogger::new();
5651
5652         create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
5653         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5654         let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
5655         create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
5656         create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
5657
5658         // Rebalance and check output sanity...
5659         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5660         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5661         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
5662
5663         let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
5664         // 0th HTLC:
5665         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5666         // 1st HTLC:
5667         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5668         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
5669         let our_node_id = &nodes[1].node.get_our_node_id();
5670         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap();
5671         // 2nd HTLC:
5672         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5673         // 3rd HTLC:
5674         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5675         // 4th HTLC:
5676         let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5677         // 5th HTLC:
5678         let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5679         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
5680         // 6th HTLC:
5681         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, 0).unwrap());
5682         // 7th HTLC:
5683         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, 0).unwrap());
5684
5685         // 8th HTLC:
5686         let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5687         // 9th HTLC:
5688         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap();
5689         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5690
5691         // 10th HTLC:
5692         let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5693         // 11th HTLC:
5694         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
5695         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, 0).unwrap());
5696
5697         // Double-check that six of the new HTLC were added
5698         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5699         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5700         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
5701         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
5702
5703         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5704         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5705         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1));
5706         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3));
5707         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
5708         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
5709         check_added_monitors!(nodes[4], 0);
5710         expect_pending_htlcs_forwardable!(nodes[4]);
5711         check_added_monitors!(nodes[4], 1);
5712
5713         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5714         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5715         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5716         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5717         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5718         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5719
5720         // Fail 3rd below-dust and 7th above-dust HTLCs
5721         assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
5722         assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
5723         check_added_monitors!(nodes[5], 0);
5724         expect_pending_htlcs_forwardable!(nodes[5]);
5725         check_added_monitors!(nodes[5], 1);
5726
5727         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5728         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5729         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5730         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5731
5732         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
5733
5734         expect_pending_htlcs_forwardable!(nodes[3]);
5735         check_added_monitors!(nodes[3], 1);
5736         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5737         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5738         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5739         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5740         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5741         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5742         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5743         if deliver_last_raa {
5744                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5745         } else {
5746                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5747         }
5748
5749         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5750         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5751         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5752         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5753         //
5754         // We now broadcast the latest commitment transaction, which *should* result in failures for
5755         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5756         // the non-broadcast above-dust HTLCs.
5757         //
5758         // Alternatively, we may broadcast the previous commitment transaction, which should only
5759         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5760         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
5761
5762         if announce_latest {
5763                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5764         } else {
5765                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5766         }
5767         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5768         check_closed_broadcast!(nodes[2], true);
5769         expect_pending_htlcs_forwardable!(nodes[2]);
5770         check_added_monitors!(nodes[2], 3);
5771
5772         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5773         assert_eq!(cs_msgs.len(), 2);
5774         let mut a_done = false;
5775         for msg in cs_msgs {
5776                 match msg {
5777                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5778                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5779                                 // should be failed-backwards here.
5780                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5781                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5782                                         for htlc in &updates.update_fail_htlcs {
5783                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5784                                         }
5785                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5786                                         assert!(!a_done);
5787                                         a_done = true;
5788                                         &nodes[0]
5789                                 } else {
5790                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5791                                         for htlc in &updates.update_fail_htlcs {
5792                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5793                                         }
5794                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5795                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5796                                         &nodes[1]
5797                                 };
5798                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5799                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5800                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5801                                 if announce_latest {
5802                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5803                                         if *node_id == nodes[0].node.get_our_node_id() {
5804                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5805                                         }
5806                                 }
5807                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5808                         },
5809                         _ => panic!("Unexpected event"),
5810                 }
5811         }
5812
5813         let as_events = nodes[0].node.get_and_clear_pending_events();
5814         assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
5815         let mut as_failds = HashSet::new();
5816         for event in as_events.iter() {
5817                 if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
5818                         assert!(as_failds.insert(*payment_hash));
5819                         if *payment_hash != payment_hash_2 {
5820                                 assert_eq!(*rejected_by_dest, deliver_last_raa);
5821                         } else {
5822                                 assert!(!rejected_by_dest);
5823                         }
5824                 } else { panic!("Unexpected event"); }
5825         }
5826         assert!(as_failds.contains(&payment_hash_1));
5827         assert!(as_failds.contains(&payment_hash_2));
5828         if announce_latest {
5829                 assert!(as_failds.contains(&payment_hash_3));
5830                 assert!(as_failds.contains(&payment_hash_5));
5831         }
5832         assert!(as_failds.contains(&payment_hash_6));
5833
5834         let bs_events = nodes[1].node.get_and_clear_pending_events();
5835         assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
5836         let mut bs_failds = HashSet::new();
5837         for event in bs_events.iter() {
5838                 if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
5839                         assert!(bs_failds.insert(*payment_hash));
5840                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5841                                 assert_eq!(*rejected_by_dest, deliver_last_raa);
5842                         } else {
5843                                 assert!(!rejected_by_dest);
5844                         }
5845                 } else { panic!("Unexpected event"); }
5846         }
5847         assert!(bs_failds.contains(&payment_hash_1));
5848         assert!(bs_failds.contains(&payment_hash_2));
5849         if announce_latest {
5850                 assert!(bs_failds.contains(&payment_hash_4));
5851         }
5852         assert!(bs_failds.contains(&payment_hash_5));
5853
5854         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5855         // get a PaymentFailureNetworkUpdate. A should have gotten 4 HTLCs which were failed-back due
5856         // to unknown-preimage-etc, B should have gotten 2. Thus, in the
5857         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2
5858         // PaymentFailureNetworkUpdates.
5859         let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
5860         assert_eq!(as_msg_events.len(), if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5861         let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
5862         assert_eq!(bs_msg_events.len(), if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5863         for event in as_msg_events.iter().chain(bs_msg_events.iter()) {
5864                 match event {
5865                         &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
5866                         _ => panic!("Unexpected event"),
5867                 }
5868         }
5869 }
5870
5871 #[test]
5872 fn test_fail_backwards_latest_remote_announce_a() {
5873         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5874 }
5875
5876 #[test]
5877 fn test_fail_backwards_latest_remote_announce_b() {
5878         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5879 }
5880
5881 #[test]
5882 fn test_fail_backwards_previous_remote_announce() {
5883         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5884         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5885         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5886 }
5887
5888 #[test]
5889 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5890         let chanmon_cfgs = create_chanmon_cfgs(2);
5891         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5892         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5893         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5894
5895         // Create some initial channels
5896         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5897
5898         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5899         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5900         assert_eq!(local_txn[0].input.len(), 1);
5901         check_spends!(local_txn[0], chan_1.3);
5902
5903         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5904         mine_transaction(&nodes[0], &local_txn[0]);
5905         check_closed_broadcast!(nodes[0], true);
5906         check_added_monitors!(nodes[0], 1);
5907         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5908
5909         let htlc_timeout = {
5910                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5911                 assert_eq!(node_txn.len(), 2);
5912                 check_spends!(node_txn[0], chan_1.3);
5913                 assert_eq!(node_txn[1].input.len(), 1);
5914                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5915                 check_spends!(node_txn[1], local_txn[0]);
5916                 node_txn[1].clone()
5917         };
5918
5919         mine_transaction(&nodes[0], &htlc_timeout);
5920         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5921         expect_payment_failed!(nodes[0], our_payment_hash, true);
5922
5923         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5924         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5925         assert_eq!(spend_txn.len(), 3);
5926         check_spends!(spend_txn[0], local_txn[0]);
5927         assert_eq!(spend_txn[1].input.len(), 1);
5928         check_spends!(spend_txn[1], htlc_timeout);
5929         assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
5930         assert_eq!(spend_txn[2].input.len(), 2);
5931         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5932         assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
5933                 spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
5934 }
5935
5936 #[test]
5937 fn test_key_derivation_params() {
5938         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with
5939         // a key manager rotation to test that key_derivation_params returned in DynamicOutputP2WSH
5940         // let us re-derive the channel key set to then derive a delayed_payment_key.
5941
5942         let chanmon_cfgs = create_chanmon_cfgs(3);
5943
5944         // We manually create the node configuration to backup the seed.
5945         let seed = [42; 32];
5946         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5947         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5948         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed, features: InitFeatures::known() };
5949         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5950         node_cfgs.remove(0);
5951         node_cfgs.insert(0, node);
5952
5953         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5954         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5955
5956         // Create some initial channels
5957         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5958         // for node 0
5959         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
5960         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5961         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5962
5963         // Ensure all nodes are at the same height
5964         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5965         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5966         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5967         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5968
5969         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5970         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5971         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5972         assert_eq!(local_txn_1[0].input.len(), 1);
5973         check_spends!(local_txn_1[0], chan_1.3);
5974
5975         // We check funding pubkey are unique
5976         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69]));
5977         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69]));
5978         if from_0_funding_key_0 == from_1_funding_key_0
5979             || from_0_funding_key_0 == from_1_funding_key_1
5980             || from_0_funding_key_1 == from_1_funding_key_0
5981             || from_0_funding_key_1 == from_1_funding_key_1 {
5982                 panic!("Funding pubkeys aren't unique");
5983         }
5984
5985         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5986         mine_transaction(&nodes[0], &local_txn_1[0]);
5987         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5988         check_closed_broadcast!(nodes[0], true);
5989         check_added_monitors!(nodes[0], 1);
5990
5991         let htlc_timeout = {
5992                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5993                 assert_eq!(node_txn[1].input.len(), 1);
5994                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5995                 check_spends!(node_txn[1], local_txn_1[0]);
5996                 node_txn[1].clone()
5997         };
5998
5999         mine_transaction(&nodes[0], &htlc_timeout);
6000         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
6001         expect_payment_failed!(nodes[0], our_payment_hash, true);
6002
6003         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
6004         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
6005         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
6006         assert_eq!(spend_txn.len(), 3);
6007         check_spends!(spend_txn[0], local_txn_1[0]);
6008         assert_eq!(spend_txn[1].input.len(), 1);
6009         check_spends!(spend_txn[1], htlc_timeout);
6010         assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
6011         assert_eq!(spend_txn[2].input.len(), 2);
6012         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
6013         assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
6014                 spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
6015 }
6016
6017 #[test]
6018 fn test_static_output_closing_tx() {
6019         let chanmon_cfgs = create_chanmon_cfgs(2);
6020         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6021         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6022         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6023
6024         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6025
6026         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
6027         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
6028
6029         mine_transaction(&nodes[0], &closing_tx);
6030         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
6031
6032         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
6033         assert_eq!(spend_txn.len(), 1);
6034         check_spends!(spend_txn[0], closing_tx);
6035
6036         mine_transaction(&nodes[1], &closing_tx);
6037         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
6038
6039         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
6040         assert_eq!(spend_txn.len(), 1);
6041         check_spends!(spend_txn[0], closing_tx);
6042 }
6043
6044 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
6045         let chanmon_cfgs = create_chanmon_cfgs(2);
6046         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6047         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6048         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6049         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6050
6051         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
6052
6053         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
6054         // present in B's local commitment transaction, but none of A's commitment transactions.
6055         assert!(nodes[1].node.claim_funds(our_payment_preimage));
6056         check_added_monitors!(nodes[1], 1);
6057
6058         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6059         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
6060         let events = nodes[0].node.get_and_clear_pending_events();
6061         assert_eq!(events.len(), 1);
6062         match events[0] {
6063                 Event::PaymentSent { payment_preimage } => {
6064                         assert_eq!(payment_preimage, our_payment_preimage);
6065                 },
6066                 _ => panic!("Unexpected event"),
6067         }
6068
6069         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
6070         check_added_monitors!(nodes[0], 1);
6071         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6072         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
6073         check_added_monitors!(nodes[1], 1);
6074
6075         let starting_block = nodes[1].best_block_info();
6076         let mut block = Block {
6077                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
6078                 txdata: vec![],
6079         };
6080         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
6081                 connect_block(&nodes[1], &block);
6082                 block.header.prev_blockhash = block.block_hash();
6083         }
6084         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
6085         check_closed_broadcast!(nodes[1], true);
6086         check_added_monitors!(nodes[1], 1);
6087 }
6088
6089 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
6090         let chanmon_cfgs = create_chanmon_cfgs(2);
6091         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6092         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6093         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6094         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6095         let logger = test_utils::TestLogger::new();
6096
6097         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]);
6098         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6099         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV, &logger).unwrap();
6100         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
6101         check_added_monitors!(nodes[0], 1);
6102
6103         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6104
6105         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
6106         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
6107         // to "time out" the HTLC.
6108
6109         let starting_block = nodes[1].best_block_info();
6110         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6111
6112         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
6113                 connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
6114                 header.prev_blockhash = header.block_hash();
6115         }
6116         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6117         check_closed_broadcast!(nodes[0], true);
6118         check_added_monitors!(nodes[0], 1);
6119 }
6120
6121 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
6122         let chanmon_cfgs = create_chanmon_cfgs(3);
6123         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6124         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6125         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6126         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6127
6128         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
6129         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
6130         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
6131         // actually revoked.
6132         let htlc_value = if use_dust { 50000 } else { 3000000 };
6133         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
6134         assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
6135         expect_pending_htlcs_forwardable!(nodes[1]);
6136         check_added_monitors!(nodes[1], 1);
6137
6138         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6139         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
6140         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
6141         check_added_monitors!(nodes[0], 1);
6142         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6143         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
6144         check_added_monitors!(nodes[1], 1);
6145         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
6146         check_added_monitors!(nodes[1], 1);
6147         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6148
6149         if check_revoke_no_close {
6150                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
6151                 check_added_monitors!(nodes[0], 1);
6152         }
6153
6154         let starting_block = nodes[1].best_block_info();
6155         let mut block = Block {
6156                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
6157                 txdata: vec![],
6158         };
6159         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
6160                 connect_block(&nodes[0], &block);
6161                 block.header.prev_blockhash = block.block_hash();
6162         }
6163         if !check_revoke_no_close {
6164                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6165                 check_closed_broadcast!(nodes[0], true);
6166                 check_added_monitors!(nodes[0], 1);
6167         } else {
6168                 expect_payment_failed!(nodes[0], our_payment_hash, true);
6169         }
6170 }
6171
6172 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
6173 // There are only a few cases to test here:
6174 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
6175 //    broadcastable commitment transactions result in channel closure,
6176 //  * its included in an unrevoked-but-previous remote commitment transaction,
6177 //  * its included in the latest remote or local commitment transactions.
6178 // We test each of the three possible commitment transactions individually and use both dust and
6179 // non-dust HTLCs.
6180 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
6181 // assume they are handled the same across all six cases, as both outbound and inbound failures are
6182 // tested for at least one of the cases in other tests.
6183 #[test]
6184 fn htlc_claim_single_commitment_only_a() {
6185         do_htlc_claim_local_commitment_only(true);
6186         do_htlc_claim_local_commitment_only(false);
6187
6188         do_htlc_claim_current_remote_commitment_only(true);
6189         do_htlc_claim_current_remote_commitment_only(false);
6190 }
6191
6192 #[test]
6193 fn htlc_claim_single_commitment_only_b() {
6194         do_htlc_claim_previous_remote_commitment_only(true, false);
6195         do_htlc_claim_previous_remote_commitment_only(false, false);
6196         do_htlc_claim_previous_remote_commitment_only(true, true);
6197         do_htlc_claim_previous_remote_commitment_only(false, true);
6198 }
6199
6200 #[test]
6201 #[should_panic]
6202 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
6203         let chanmon_cfgs = create_chanmon_cfgs(2);
6204         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6205         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6206         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6207         //Force duplicate channel ids
6208         for node in nodes.iter() {
6209                 *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
6210         }
6211
6212         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
6213         let channel_value_satoshis=10000;
6214         let push_msat=10001;
6215         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
6216         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6217         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
6218
6219         //Create a second channel with a channel_id collision
6220         assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6221 }
6222
6223 #[test]
6224 fn bolt2_open_channel_sending_node_checks_part2() {
6225         let chanmon_cfgs = create_chanmon_cfgs(2);
6226         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6227         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6228         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6229
6230         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
6231         let channel_value_satoshis=2^24;
6232         let push_msat=10001;
6233         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6234
6235         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
6236         let channel_value_satoshis=10000;
6237         // Test when push_msat is equal to 1000 * funding_satoshis.
6238         let push_msat=1000*channel_value_satoshis+1;
6239         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6240
6241         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
6242         let channel_value_satoshis=10000;
6243         let push_msat=10001;
6244         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
6245         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6246         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
6247
6248         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
6249         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
6250         assert!(node0_to_1_send_open_channel.channel_flags<=1);
6251
6252         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
6253         assert!(BREAKDOWN_TIMEOUT>0);
6254         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
6255
6256         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
6257         let chain_hash=genesis_block(Network::Testnet).header.block_hash();
6258         assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
6259
6260         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
6261         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
6262         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
6263         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
6264         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
6265         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
6266 }
6267
6268 #[test]
6269 fn bolt2_open_channel_sane_dust_limit() {
6270         let chanmon_cfgs = create_chanmon_cfgs(2);
6271         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6272         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6273         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6274
6275         let channel_value_satoshis=1000000;
6276         let push_msat=10001;
6277         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
6278         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6279         node0_to_1_send_open_channel.dust_limit_satoshis = 661;
6280         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
6281
6282         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
6283         let events = nodes[1].node.get_and_clear_pending_msg_events();
6284         let err_msg = match events[0] {
6285                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
6286                         msg.clone()
6287                 },
6288                 _ => panic!("Unexpected event"),
6289         };
6290         assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)");
6291 }
6292
6293 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
6294 // originated from our node, its failure is surfaced to the user. We trigger this failure to
6295 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
6296 // is no longer affordable once it's freed.
6297 #[test]
6298 fn test_fail_holding_cell_htlc_upon_free() {
6299         let chanmon_cfgs = create_chanmon_cfgs(2);
6300         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6301         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6302         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6303         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6304         let logger = test_utils::TestLogger::new();
6305
6306         // First nodes[0] generates an update_fee, setting the channel's
6307         // pending_update_fee.
6308         {
6309                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6310                 *feerate_lock += 20;
6311         }
6312         nodes[0].node.timer_tick_occurred();
6313         check_added_monitors!(nodes[0], 1);
6314
6315         let events = nodes[0].node.get_and_clear_pending_msg_events();
6316         assert_eq!(events.len(), 1);
6317         let (update_msg, commitment_signed) = match events[0] {
6318                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6319                         (update_fee.as_ref(), commitment_signed)
6320                 },
6321                 _ => panic!("Unexpected event"),
6322         };
6323
6324         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6325
6326         let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6327         let channel_reserve = chan_stat.channel_reserve_msat;
6328         let feerate = get_feerate!(nodes[0], chan.2);
6329
6330         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6331         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6332         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1);
6333         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6334         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6335
6336         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6337         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6338         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6339         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6340
6341         // Flush the pending fee update.
6342         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6343         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6344         check_added_monitors!(nodes[1], 1);
6345         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
6346         check_added_monitors!(nodes[0], 1);
6347
6348         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
6349         // HTLC, but now that the fee has been raised the payment will now fail, causing
6350         // us to surface its failure to the user.
6351         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6352         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6353         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
6354         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
6355                 hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
6356         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
6357
6358         // Check that the payment failed to be sent out.
6359         let events = nodes[0].node.get_and_clear_pending_events();
6360         assert_eq!(events.len(), 1);
6361         match &events[0] {
6362                 &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => {
6363                         assert_eq!(our_payment_hash.clone(), *payment_hash);
6364                         assert_eq!(*rejected_by_dest, false);
6365                         assert_eq!(*error_code, None);
6366                         assert_eq!(*error_data, None);
6367                 },
6368                 _ => panic!("Unexpected event"),
6369         }
6370 }
6371
6372 // Test that if multiple HTLCs are released from the holding cell and one is
6373 // valid but the other is no longer valid upon release, the valid HTLC can be
6374 // successfully completed while the other one fails as expected.
6375 #[test]
6376 fn test_free_and_fail_holding_cell_htlcs() {
6377         let chanmon_cfgs = create_chanmon_cfgs(2);
6378         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6379         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6380         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6381         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6382         let logger = test_utils::TestLogger::new();
6383
6384         // First nodes[0] generates an update_fee, setting the channel's
6385         // pending_update_fee.
6386         {
6387                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
6388                 *feerate_lock += 200;
6389         }
6390         nodes[0].node.timer_tick_occurred();
6391         check_added_monitors!(nodes[0], 1);
6392
6393         let events = nodes[0].node.get_and_clear_pending_msg_events();
6394         assert_eq!(events.len(), 1);
6395         let (update_msg, commitment_signed) = match events[0] {
6396                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6397                         (update_fee.as_ref(), commitment_signed)
6398                 },
6399                 _ => panic!("Unexpected event"),
6400         };
6401
6402         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6403
6404         let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6405         let channel_reserve = chan_stat.channel_reserve_msat;
6406         let feerate = get_feerate!(nodes[0], chan.2);
6407
6408         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6409         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
6410         let amt_1 = 20000;
6411         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
6412         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1;
6413         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6414         let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_1, TEST_FINAL_CLTV, &logger).unwrap();
6415         let route_2 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_2, TEST_FINAL_CLTV, &logger).unwrap();
6416
6417         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6418         nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1)).unwrap();
6419         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6420         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6421         nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap();
6422         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6423         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6424
6425         // Flush the pending fee update.
6426         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6427         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6428         check_added_monitors!(nodes[1], 1);
6429         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6430         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6431         check_added_monitors!(nodes[0], 2);
6432
6433         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6434         // but now that the fee has been raised the second payment will now fail, causing us
6435         // to surface its failure to the user. The first payment should succeed.
6436         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6437         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6438         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
6439         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
6440                 hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
6441         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
6442
6443         // Check that the second payment failed to be sent out.
6444         let events = nodes[0].node.get_and_clear_pending_events();
6445         assert_eq!(events.len(), 1);
6446         match &events[0] {
6447                 &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => {
6448                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6449                         assert_eq!(*rejected_by_dest, false);
6450                         assert_eq!(*error_code, None);
6451                         assert_eq!(*error_data, None);
6452                 },
6453                 _ => panic!("Unexpected event"),
6454         }
6455
6456         // Complete the first payment and the RAA from the fee update.
6457         let (payment_event, send_raa_event) = {
6458                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6459                 assert_eq!(msgs.len(), 2);
6460                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6461         };
6462         let raa = match send_raa_event {
6463                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6464                 _ => panic!("Unexpected event"),
6465         };
6466         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6467         check_added_monitors!(nodes[1], 1);
6468         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6469         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6470         let events = nodes[1].node.get_and_clear_pending_events();
6471         assert_eq!(events.len(), 1);
6472         match events[0] {
6473                 Event::PendingHTLCsForwardable { .. } => {},
6474                 _ => panic!("Unexpected event"),
6475         }
6476         nodes[1].node.process_pending_htlc_forwards();
6477         let events = nodes[1].node.get_and_clear_pending_events();
6478         assert_eq!(events.len(), 1);
6479         match events[0] {
6480                 Event::PaymentReceived { .. } => {},
6481                 _ => panic!("Unexpected event"),
6482         }
6483         nodes[1].node.claim_funds(payment_preimage_1);
6484         check_added_monitors!(nodes[1], 1);
6485         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6486         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6487         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6488         let events = nodes[0].node.get_and_clear_pending_events();
6489         assert_eq!(events.len(), 1);
6490         match events[0] {
6491                 Event::PaymentSent { ref payment_preimage } => {
6492                         assert_eq!(*payment_preimage, payment_preimage_1);
6493                 }
6494                 _ => panic!("Unexpected event"),
6495         }
6496 }
6497
6498 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6499 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6500 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6501 // once it's freed.
6502 #[test]
6503 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6504         let chanmon_cfgs = create_chanmon_cfgs(3);
6505         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6506         // When this test was written, the default base fee floated based on the HTLC count.
6507         // It is now fixed, so we simply set the fee to the expected value here.
6508         let mut config = test_default_channel_config();
6509         config.channel_options.forwarding_fee_base_msat = 196;
6510         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6511         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6512         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6513         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6514         let logger = test_utils::TestLogger::new();
6515
6516         // First nodes[1] generates an update_fee, setting the channel's
6517         // pending_update_fee.
6518         {
6519                 let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
6520                 *feerate_lock += 20;
6521         }
6522         nodes[1].node.timer_tick_occurred();
6523         check_added_monitors!(nodes[1], 1);
6524
6525         let events = nodes[1].node.get_and_clear_pending_msg_events();
6526         assert_eq!(events.len(), 1);
6527         let (update_msg, commitment_signed) = match events[0] {
6528                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6529                         (update_fee.as_ref(), commitment_signed)
6530                 },
6531                 _ => panic!("Unexpected event"),
6532         };
6533
6534         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6535
6536         let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2);
6537         let channel_reserve = chan_stat.channel_reserve_msat;
6538         let feerate = get_feerate!(nodes[0], chan_0_1.2);
6539
6540         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6541         let feemsat = 239;
6542         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
6543         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
6544         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat;
6545         let payment_event = {
6546                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6547                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6548                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6549                 check_added_monitors!(nodes[0], 1);
6550
6551                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6552                 assert_eq!(events.len(), 1);
6553
6554                 SendEvent::from_event(events.remove(0))
6555         };
6556         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6557         check_added_monitors!(nodes[1], 0);
6558         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6559         expect_pending_htlcs_forwardable!(nodes[1]);
6560
6561         chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2);
6562         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6563
6564         // Flush the pending fee update.
6565         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6566         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6567         check_added_monitors!(nodes[2], 1);
6568         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6569         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6570         check_added_monitors!(nodes[1], 2);
6571
6572         // A final RAA message is generated to finalize the fee update.
6573         let events = nodes[1].node.get_and_clear_pending_msg_events();
6574         assert_eq!(events.len(), 1);
6575
6576         let raa_msg = match &events[0] {
6577                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6578                         msg.clone()
6579                 },
6580                 _ => panic!("Unexpected event"),
6581         };
6582
6583         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6584         check_added_monitors!(nodes[2], 1);
6585         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6586
6587         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6588         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6589         assert_eq!(process_htlc_forwards_event.len(), 1);
6590         match &process_htlc_forwards_event[0] {
6591                 &Event::PendingHTLCsForwardable { .. } => {},
6592                 _ => panic!("Unexpected event"),
6593         }
6594
6595         // In response, we call ChannelManager's process_pending_htlc_forwards
6596         nodes[1].node.process_pending_htlc_forwards();
6597         check_added_monitors!(nodes[1], 1);
6598
6599         // This causes the HTLC to be failed backwards.
6600         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6601         assert_eq!(fail_event.len(), 1);
6602         let (fail_msg, commitment_signed) = match &fail_event[0] {
6603                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6604                         assert_eq!(updates.update_add_htlcs.len(), 0);
6605                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6606                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6607                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6608                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6609                 },
6610                 _ => panic!("Unexpected event"),
6611         };
6612
6613         // Pass the failure messages back to nodes[0].
6614         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6615         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6616
6617         // Complete the HTLC failure+removal process.
6618         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6619         check_added_monitors!(nodes[0], 1);
6620         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6621         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6622         check_added_monitors!(nodes[1], 2);
6623         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6624         assert_eq!(final_raa_event.len(), 1);
6625         let raa = match &final_raa_event[0] {
6626                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6627                 _ => panic!("Unexpected event"),
6628         };
6629         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6630         expect_payment_failure_chan_update!(nodes[0], chan_1_2.0.contents.short_channel_id, false);
6631         expect_payment_failed!(nodes[0], our_payment_hash, false);
6632         check_added_monitors!(nodes[0], 1);
6633 }
6634
6635 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6636 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6637 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6638
6639 #[test]
6640 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6641         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6642         let chanmon_cfgs = create_chanmon_cfgs(2);
6643         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6644         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6645         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6646         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6647
6648         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6649         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6650         let logger = test_utils::TestLogger::new();
6651         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6652         route.paths[0][0].fee_msat = 100;
6653
6654         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6655                 assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
6656         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6657         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
6658 }
6659
6660 #[test]
6661 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6662         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6663         let chanmon_cfgs = create_chanmon_cfgs(2);
6664         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6665         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6666         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6667         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6668         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6669
6670         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6671         let logger = test_utils::TestLogger::new();
6672         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6673         route.paths[0][0].fee_msat = 0;
6674         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6675                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6676
6677         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6678         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
6679 }
6680
6681 #[test]
6682 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6683         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6684         let chanmon_cfgs = create_chanmon_cfgs(2);
6685         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6686         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6687         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6688         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6689
6690         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6691         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6692         let logger = test_utils::TestLogger::new();
6693         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6694         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6695         check_added_monitors!(nodes[0], 1);
6696         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6697         updates.update_add_htlcs[0].amount_msat = 0;
6698
6699         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6700         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6701         check_closed_broadcast!(nodes[1], true).unwrap();
6702         check_added_monitors!(nodes[1], 1);
6703 }
6704
6705 #[test]
6706 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6707         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6708         //It is enforced when constructing a route.
6709         let chanmon_cfgs = create_chanmon_cfgs(2);
6710         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6711         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6712         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6713         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
6714         let logger = test_utils::TestLogger::new();
6715
6716         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6717
6718         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6719         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000000, 500000001, &logger).unwrap();
6720         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::RouteError { ref err },
6721                 assert_eq!(err, &"Channel CLTV overflowed?"));
6722 }
6723
6724 #[test]
6725 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6726         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6727         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6728         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6729         let chanmon_cfgs = create_chanmon_cfgs(2);
6730         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6731         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6732         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6733         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
6734         let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
6735
6736         let logger = test_utils::TestLogger::new();
6737         for i in 0..max_accepted_htlcs {
6738                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6739                 let payment_event = {
6740                         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6741                         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6742                         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6743                         check_added_monitors!(nodes[0], 1);
6744
6745                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6746                         assert_eq!(events.len(), 1);
6747                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6748                                 assert_eq!(htlcs[0].htlc_id, i);
6749                         } else {
6750                                 assert!(false);
6751                         }
6752                         SendEvent::from_event(events.remove(0))
6753                 };
6754                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6755                 check_added_monitors!(nodes[1], 0);
6756                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6757
6758                 expect_pending_htlcs_forwardable!(nodes[1]);
6759                 expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6760         }
6761         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6762         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6763         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6764         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6765                 assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
6766
6767         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6768         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
6769 }
6770
6771 #[test]
6772 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6773         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6774         let chanmon_cfgs = create_chanmon_cfgs(2);
6775         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6776         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6777         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6778         let channel_value = 100000;
6779         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, InitFeatures::known(), InitFeatures::known());
6780         let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat;
6781
6782         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6783
6784         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6785         // Manually create a route over our max in flight (which our router normally automatically
6786         // limits us to.
6787         let route = Route { paths: vec![vec![RouteHop {
6788            pubkey: nodes[1].node.get_our_node_id(), node_features: NodeFeatures::known(), channel_features: ChannelFeatures::known(),
6789            short_channel_id: nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(),
6790            fee_msat: max_in_flight + 1, cltv_expiry_delta: TEST_FINAL_CLTV
6791         }]] };
6792         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6793                 assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
6794
6795         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6796         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
6797
6798         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6799 }
6800
6801 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6802 #[test]
6803 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6804         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6805         let chanmon_cfgs = create_chanmon_cfgs(2);
6806         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6807         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6808         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6809         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6810         let htlc_minimum_msat: u64;
6811         {
6812                 let chan_lock = nodes[0].node.channel_state.lock().unwrap();
6813                 let channel = chan_lock.by_id.get(&chan.2).unwrap();
6814                 htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
6815         }
6816
6817         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6818         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6819         let logger = test_utils::TestLogger::new();
6820         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], htlc_minimum_msat, TEST_FINAL_CLTV, &logger).unwrap();
6821         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6822         check_added_monitors!(nodes[0], 1);
6823         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6824         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6825         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6826         assert!(nodes[1].node.list_channels().is_empty());
6827         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6828         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6829         check_added_monitors!(nodes[1], 1);
6830 }
6831
6832 #[test]
6833 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6834         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6835         let chanmon_cfgs = create_chanmon_cfgs(2);
6836         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6837         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6838         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6839         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6840         let logger = test_utils::TestLogger::new();
6841
6842         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6843         let channel_reserve = chan_stat.channel_reserve_msat;
6844         let feerate = get_feerate!(nodes[0], chan.2);
6845         // The 2* and +1 are for the fee spike reserve.
6846         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1);
6847
6848         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6849         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6850         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6851         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6852         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6853         check_added_monitors!(nodes[0], 1);
6854         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6855
6856         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6857         // at this time channel-initiatee receivers are not required to enforce that senders
6858         // respect the fee_spike_reserve.
6859         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6860         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6861
6862         assert!(nodes[1].node.list_channels().is_empty());
6863         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6864         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6865         check_added_monitors!(nodes[1], 1);
6866 }
6867
6868 #[test]
6869 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6870         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6871         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6872         let chanmon_cfgs = create_chanmon_cfgs(2);
6873         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6874         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6875         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6876         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6877         let logger = test_utils::TestLogger::new();
6878
6879         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6880         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6881
6882         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6883         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 3999999, TEST_FINAL_CLTV, &logger).unwrap();
6884
6885         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6886         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6887         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height, &None).unwrap();
6888         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
6889
6890         let mut msg = msgs::UpdateAddHTLC {
6891                 channel_id: chan.2,
6892                 htlc_id: 0,
6893                 amount_msat: 1000,
6894                 payment_hash: our_payment_hash,
6895                 cltv_expiry: htlc_cltv,
6896                 onion_routing_packet: onion_packet.clone(),
6897         };
6898
6899         for i in 0..super::channel::OUR_MAX_HTLCS {
6900                 msg.htlc_id = i as u64;
6901                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6902         }
6903         msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
6904         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6905
6906         assert!(nodes[1].node.list_channels().is_empty());
6907         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6908         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6909         check_added_monitors!(nodes[1], 1);
6910 }
6911
6912 #[test]
6913 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6914         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6915         let chanmon_cfgs = create_chanmon_cfgs(2);
6916         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6917         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6918         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6919         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
6920         let logger = test_utils::TestLogger::new();
6921
6922         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6923         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6924         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6925         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6926         check_added_monitors!(nodes[0], 1);
6927         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6928         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6929         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6930
6931         assert!(nodes[1].node.list_channels().is_empty());
6932         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6933         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6934         check_added_monitors!(nodes[1], 1);
6935 }
6936
6937 #[test]
6938 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6939         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6940         let chanmon_cfgs = create_chanmon_cfgs(2);
6941         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6942         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6943         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6944         let logger = test_utils::TestLogger::new();
6945
6946         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6947         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6948         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6949         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6950         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6951         check_added_monitors!(nodes[0], 1);
6952         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6953         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6954         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6955
6956         assert!(nodes[1].node.list_channels().is_empty());
6957         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6958         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6959         check_added_monitors!(nodes[1], 1);
6960 }
6961
6962 #[test]
6963 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6964         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6965         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6966         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6967         let chanmon_cfgs = create_chanmon_cfgs(2);
6968         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6969         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6970         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6971         let logger = test_utils::TestLogger::new();
6972
6973         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6974         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6975         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6976         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6977         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6978         check_added_monitors!(nodes[0], 1);
6979         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6980         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6981
6982         //Disconnect and Reconnect
6983         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6984         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6985         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
6986         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6987         assert_eq!(reestablish_1.len(), 1);
6988         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
6989         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6990         assert_eq!(reestablish_2.len(), 1);
6991         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6992         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6993         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6994         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6995
6996         //Resend HTLC
6997         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6998         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6999         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
7000         check_added_monitors!(nodes[1], 1);
7001         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7002
7003         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7004
7005         assert!(nodes[1].node.list_channels().is_empty());
7006         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
7007         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
7008         check_added_monitors!(nodes[1], 1);
7009 }
7010
7011 #[test]
7012 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
7013         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
7014
7015         let chanmon_cfgs = create_chanmon_cfgs(2);
7016         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7017         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7018         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7019         let logger = test_utils::TestLogger::new();
7020         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7021         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7022         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7023         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7024         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7025
7026         check_added_monitors!(nodes[0], 1);
7027         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7028         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7029
7030         let update_msg = msgs::UpdateFulfillHTLC{
7031                 channel_id: chan.2,
7032                 htlc_id: 0,
7033                 payment_preimage: our_payment_preimage,
7034         };
7035
7036         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7037
7038         assert!(nodes[0].node.list_channels().is_empty());
7039         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7040         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
7041         check_added_monitors!(nodes[0], 1);
7042 }
7043
7044 #[test]
7045 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
7046         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
7047
7048         let chanmon_cfgs = create_chanmon_cfgs(2);
7049         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7050         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7051         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7052         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7053         let logger = test_utils::TestLogger::new();
7054
7055         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7056         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7057         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7058         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7059         check_added_monitors!(nodes[0], 1);
7060         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7061         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7062
7063         let update_msg = msgs::UpdateFailHTLC{
7064                 channel_id: chan.2,
7065                 htlc_id: 0,
7066                 reason: msgs::OnionErrorPacket { data: Vec::new()},
7067         };
7068
7069         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7070
7071         assert!(nodes[0].node.list_channels().is_empty());
7072         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7073         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
7074         check_added_monitors!(nodes[0], 1);
7075 }
7076
7077 #[test]
7078 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
7079         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
7080
7081         let chanmon_cfgs = create_chanmon_cfgs(2);
7082         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7083         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7084         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7085         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7086         let logger = test_utils::TestLogger::new();
7087
7088         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7089         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7090         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7091         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7092         check_added_monitors!(nodes[0], 1);
7093         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7094         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7095         let update_msg = msgs::UpdateFailMalformedHTLC{
7096                 channel_id: chan.2,
7097                 htlc_id: 0,
7098                 sha256_of_onion: [1; 32],
7099                 failure_code: 0x8000,
7100         };
7101
7102         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7103
7104         assert!(nodes[0].node.list_channels().is_empty());
7105         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7106         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
7107         check_added_monitors!(nodes[0], 1);
7108 }
7109
7110 #[test]
7111 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
7112         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
7113
7114         let chanmon_cfgs = create_chanmon_cfgs(2);
7115         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7116         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7117         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7118         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7119
7120         let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
7121
7122         nodes[1].node.claim_funds(our_payment_preimage);
7123         check_added_monitors!(nodes[1], 1);
7124
7125         let events = nodes[1].node.get_and_clear_pending_msg_events();
7126         assert_eq!(events.len(), 1);
7127         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
7128                 match events[0] {
7129                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7130                                 assert!(update_add_htlcs.is_empty());
7131                                 assert_eq!(update_fulfill_htlcs.len(), 1);
7132                                 assert!(update_fail_htlcs.is_empty());
7133                                 assert!(update_fail_malformed_htlcs.is_empty());
7134                                 assert!(update_fee.is_none());
7135                                 update_fulfill_htlcs[0].clone()
7136                         },
7137                         _ => panic!("Unexpected event"),
7138                 }
7139         };
7140
7141         update_fulfill_msg.htlc_id = 1;
7142
7143         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
7144
7145         assert!(nodes[0].node.list_channels().is_empty());
7146         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7147         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
7148         check_added_monitors!(nodes[0], 1);
7149 }
7150
7151 #[test]
7152 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
7153         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
7154
7155         let chanmon_cfgs = create_chanmon_cfgs(2);
7156         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7157         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7158         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7159         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7160
7161         let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
7162
7163         nodes[1].node.claim_funds(our_payment_preimage);
7164         check_added_monitors!(nodes[1], 1);
7165
7166         let events = nodes[1].node.get_and_clear_pending_msg_events();
7167         assert_eq!(events.len(), 1);
7168         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
7169                 match events[0] {
7170                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7171                                 assert!(update_add_htlcs.is_empty());
7172                                 assert_eq!(update_fulfill_htlcs.len(), 1);
7173                                 assert!(update_fail_htlcs.is_empty());
7174                                 assert!(update_fail_malformed_htlcs.is_empty());
7175                                 assert!(update_fee.is_none());
7176                                 update_fulfill_htlcs[0].clone()
7177                         },
7178                         _ => panic!("Unexpected event"),
7179                 }
7180         };
7181
7182         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
7183
7184         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
7185
7186         assert!(nodes[0].node.list_channels().is_empty());
7187         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7188         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
7189         check_added_monitors!(nodes[0], 1);
7190 }
7191
7192 #[test]
7193 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
7194         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
7195
7196         let chanmon_cfgs = create_chanmon_cfgs(2);
7197         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7198         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7199         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7200         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7201         let logger = test_utils::TestLogger::new();
7202
7203         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7204         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7205         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7206         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7207         check_added_monitors!(nodes[0], 1);
7208
7209         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7210         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
7211
7212         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7213         check_added_monitors!(nodes[1], 0);
7214         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
7215
7216         let events = nodes[1].node.get_and_clear_pending_msg_events();
7217
7218         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
7219                 match events[0] {
7220                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7221                                 assert!(update_add_htlcs.is_empty());
7222                                 assert!(update_fulfill_htlcs.is_empty());
7223                                 assert!(update_fail_htlcs.is_empty());
7224                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
7225                                 assert!(update_fee.is_none());
7226                                 update_fail_malformed_htlcs[0].clone()
7227                         },
7228                         _ => panic!("Unexpected event"),
7229                 }
7230         };
7231         update_msg.failure_code &= !0x8000;
7232         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7233
7234         assert!(nodes[0].node.list_channels().is_empty());
7235         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7236         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
7237         check_added_monitors!(nodes[0], 1);
7238 }
7239
7240 #[test]
7241 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
7242         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
7243         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
7244
7245         let chanmon_cfgs = create_chanmon_cfgs(3);
7246         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7247         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7248         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7249         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7250         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7251         let logger = test_utils::TestLogger::new();
7252
7253         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
7254
7255         //First hop
7256         let mut payment_event = {
7257                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7258                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
7259                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7260                 check_added_monitors!(nodes[0], 1);
7261                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7262                 assert_eq!(events.len(), 1);
7263                 SendEvent::from_event(events.remove(0))
7264         };
7265         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7266         check_added_monitors!(nodes[1], 0);
7267         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7268         expect_pending_htlcs_forwardable!(nodes[1]);
7269         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
7270         assert_eq!(events_2.len(), 1);
7271         check_added_monitors!(nodes[1], 1);
7272         payment_event = SendEvent::from_event(events_2.remove(0));
7273         assert_eq!(payment_event.msgs.len(), 1);
7274
7275         //Second Hop
7276         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
7277         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
7278         check_added_monitors!(nodes[2], 0);
7279         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
7280
7281         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
7282         assert_eq!(events_3.len(), 1);
7283         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
7284                 match events_3[0] {
7285                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7286                                 assert!(update_add_htlcs.is_empty());
7287                                 assert!(update_fulfill_htlcs.is_empty());
7288                                 assert!(update_fail_htlcs.is_empty());
7289                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
7290                                 assert!(update_fee.is_none());
7291                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
7292                         },
7293                         _ => panic!("Unexpected event"),
7294                 }
7295         };
7296
7297         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
7298
7299         check_added_monitors!(nodes[1], 0);
7300         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
7301         expect_pending_htlcs_forwardable!(nodes[1]);
7302         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7303         assert_eq!(events_4.len(), 1);
7304
7305         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
7306         match events_4[0] {
7307                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7308                         assert!(update_add_htlcs.is_empty());
7309                         assert!(update_fulfill_htlcs.is_empty());
7310                         assert_eq!(update_fail_htlcs.len(), 1);
7311                         assert!(update_fail_malformed_htlcs.is_empty());
7312                         assert!(update_fee.is_none());
7313                 },
7314                 _ => panic!("Unexpected event"),
7315         };
7316
7317         check_added_monitors!(nodes[1], 1);
7318 }
7319
7320 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7321         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7322         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7323         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7324
7325         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7326         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7327         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7328         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7329         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7330         let chan =create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7331
7332         let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
7333
7334         // We route 2 dust-HTLCs between A and B
7335         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7336         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7337         route_payment(&nodes[0], &[&nodes[1]], 1000000);
7338
7339         // Cache one local commitment tx as previous
7340         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7341
7342         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7343         assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
7344         check_added_monitors!(nodes[1], 0);
7345         expect_pending_htlcs_forwardable!(nodes[1]);
7346         check_added_monitors!(nodes[1], 1);
7347
7348         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7349         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7350         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7351         check_added_monitors!(nodes[0], 1);
7352
7353         // Cache one local commitment tx as lastest
7354         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7355
7356         let events = nodes[0].node.get_and_clear_pending_msg_events();
7357         match events[0] {
7358                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7359                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7360                 },
7361                 _ => panic!("Unexpected event"),
7362         }
7363         match events[1] {
7364                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7365                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7366                 },
7367                 _ => panic!("Unexpected event"),
7368         }
7369
7370         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7371         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7372         if announce_latest {
7373                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7374         } else {
7375                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7376         }
7377
7378         check_closed_broadcast!(nodes[0], true);
7379         check_added_monitors!(nodes[0], 1);
7380
7381         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7382         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7383         let events = nodes[0].node.get_and_clear_pending_events();
7384         // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
7385         assert_eq!(events.len(), 2);
7386         let mut first_failed = false;
7387         for event in events {
7388                 match event {
7389                         Event::PaymentFailed { payment_hash, .. } => {
7390                                 if payment_hash == payment_hash_1 {
7391                                         assert!(!first_failed);
7392                                         first_failed = true;
7393                                 } else {
7394                                         assert_eq!(payment_hash, payment_hash_2);
7395                                 }
7396                         }
7397                         _ => panic!("Unexpected event"),
7398                 }
7399         }
7400 }
7401
7402 #[test]
7403 fn test_failure_delay_dust_htlc_local_commitment() {
7404         do_test_failure_delay_dust_htlc_local_commitment(true);
7405         do_test_failure_delay_dust_htlc_local_commitment(false);
7406 }
7407
7408 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7409         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7410         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7411         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7412         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7413         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7414         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7415
7416         let chanmon_cfgs = create_chanmon_cfgs(3);
7417         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7418         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7419         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7420         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7421
7422         let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
7423
7424         let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7425         let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7426
7427         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7428         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7429
7430         // We revoked bs_commitment_tx
7431         if revoked {
7432                 let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7433                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7434         }
7435
7436         let mut timeout_tx = Vec::new();
7437         if local {
7438                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7439                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7440                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7441                 expect_payment_failed!(nodes[0], dust_hash, true);
7442
7443                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7444                 check_closed_broadcast!(nodes[0], true);
7445                 check_added_monitors!(nodes[0], 1);
7446                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7447                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
7448                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7449                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7450                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7451                 mine_transaction(&nodes[0], &timeout_tx[0]);
7452                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7453                 expect_payment_failed!(nodes[0], non_dust_hash, true);
7454         } else {
7455                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7456                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7457                 check_closed_broadcast!(nodes[0], true);
7458                 check_added_monitors!(nodes[0], 1);
7459                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7460                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
7461                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
7462                 if !revoked {
7463                         expect_payment_failed!(nodes[0], dust_hash, true);
7464                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7465                         // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
7466                         mine_transaction(&nodes[0], &timeout_tx[0]);
7467                         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7468                         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7469                         expect_payment_failed!(nodes[0], non_dust_hash, true);
7470                 } else {
7471                         // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
7472                         // commitment tx
7473                         let events = nodes[0].node.get_and_clear_pending_events();
7474                         assert_eq!(events.len(), 2);
7475                         let first;
7476                         match events[0] {
7477                                 Event::PaymentFailed { payment_hash, .. } => {
7478                                         if payment_hash == dust_hash { first = true; }
7479                                         else { first = false; }
7480                                 },
7481                                 _ => panic!("Unexpected event"),
7482                         }
7483                         match events[1] {
7484                                 Event::PaymentFailed { payment_hash, .. } => {
7485                                         if first { assert_eq!(payment_hash, non_dust_hash); }
7486                                         else { assert_eq!(payment_hash, dust_hash); }
7487                                 },
7488                                 _ => panic!("Unexpected event"),
7489                         }
7490                 }
7491         }
7492 }
7493
7494 #[test]
7495 fn test_sweep_outbound_htlc_failure_update() {
7496         do_test_sweep_outbound_htlc_failure_update(false, true);
7497         do_test_sweep_outbound_htlc_failure_update(false, false);
7498         do_test_sweep_outbound_htlc_failure_update(true, false);
7499 }
7500
7501 #[test]
7502 fn test_upfront_shutdown_script() {
7503         // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
7504         // enforce it at shutdown message
7505
7506         let mut config = UserConfig::default();
7507         config.channel_options.announced_channel = true;
7508         config.peer_channel_config_limits.force_announced_channel_preference = false;
7509         config.channel_options.commit_upfront_shutdown_pubkey = false;
7510         let user_cfgs = [None, Some(config), None];
7511         let chanmon_cfgs = create_chanmon_cfgs(3);
7512         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7513         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7514         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7515
7516         // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
7517         let flags = InitFeatures::known();
7518         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
7519         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7520         let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
7521         node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
7522         // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
7523         nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7524     assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
7525         check_added_monitors!(nodes[2], 1);
7526
7527         // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
7528         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
7529         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7530         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
7531         // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
7532         nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7533         let events = nodes[2].node.get_and_clear_pending_msg_events();
7534         assert_eq!(events.len(), 1);
7535         match events[0] {
7536                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
7537                 _ => panic!("Unexpected event"),
7538         }
7539
7540         // We test that if case of peer non-signaling we don't enforce committed script at channel opening
7541         let flags_no = InitFeatures::known().clear_upfront_shutdown_script();
7542         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
7543         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7544         let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
7545         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
7546         check_added_monitors!(nodes[1], 1);
7547         let events = nodes[1].node.get_and_clear_pending_msg_events();
7548         assert_eq!(events.len(), 1);
7549         match events[0] {
7550                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
7551                 _ => panic!("Unexpected event"),
7552         }
7553
7554         // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
7555         // channel smoothly, opt-out is from channel initiator here
7556         let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
7557         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7558         check_added_monitors!(nodes[1], 1);
7559         let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7560         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7561         let events = nodes[0].node.get_and_clear_pending_msg_events();
7562         assert_eq!(events.len(), 1);
7563         match events[0] {
7564                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7565                 _ => panic!("Unexpected event"),
7566         }
7567
7568         //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
7569         //// channel smoothly
7570         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
7571         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7572         check_added_monitors!(nodes[1], 1);
7573         let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7574         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7575         let events = nodes[0].node.get_and_clear_pending_msg_events();
7576         assert_eq!(events.len(), 2);
7577         match events[0] {
7578                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7579                 _ => panic!("Unexpected event"),
7580         }
7581         match events[1] {
7582                 MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7583                 _ => panic!("Unexpected event"),
7584         }
7585 }
7586
7587 #[test]
7588 fn test_unsupported_anysegwit_upfront_shutdown_script() {
7589         let chanmon_cfgs = create_chanmon_cfgs(2);
7590         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7591         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7592         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7593
7594         // Use a non-v0 segwit script supported by option_shutdown_anysegwit
7595         let node_features = InitFeatures::known().clear_shutdown_anysegwit();
7596         let anysegwit_shutdown_script = Builder::new()
7597                 .push_int(16)
7598                 .push_slice(&[0, 40])
7599                 .into_script();
7600
7601         // Check script when handling an open_channel message
7602         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
7603         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7604         open_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone());
7605         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), node_features.clone(), &open_channel);
7606
7607         let events = nodes[1].node.get_and_clear_pending_msg_events();
7608         assert_eq!(events.len(), 1);
7609         match events[0] {
7610                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7611                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
7612                         assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)");
7613                 },
7614                 _ => panic!("Unexpected event"),
7615         }
7616
7617         // Check script when handling an accept_channel message
7618         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
7619         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7620         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
7621         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7622         accept_channel.shutdown_scriptpubkey = Present(anysegwit_shutdown_script.clone());
7623         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), node_features, &accept_channel);
7624
7625         let events = nodes[0].node.get_and_clear_pending_msg_events();
7626         assert_eq!(events.len(), 1);
7627         match events[0] {
7628                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7629                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7630                         assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_PUSHNUM_16 OP_PUSHBYTES_2 0028)");
7631                 },
7632                 _ => panic!("Unexpected event"),
7633         }
7634 }
7635
7636 #[test]
7637 fn test_invalid_upfront_shutdown_script() {
7638         let chanmon_cfgs = create_chanmon_cfgs(2);
7639         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7640         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7641         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7642
7643         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
7644
7645         // Use a segwit v0 script with an unsupported witness program
7646         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7647         open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(0)
7648                 .push_slice(&[0, 0])
7649                 .into_script());
7650         nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
7651
7652         let events = nodes[0].node.get_and_clear_pending_msg_events();
7653         assert_eq!(events.len(), 1);
7654         match events[0] {
7655                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7656                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
7657                         assert_eq!(msg.data, "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: Script(OP_0 OP_PUSHBYTES_2 0000)");
7658                 },
7659                 _ => panic!("Unexpected event"),
7660         }
7661 }
7662
7663 #[test]
7664 fn test_segwit_v0_shutdown_script() {
7665         let mut config = UserConfig::default();
7666         config.channel_options.announced_channel = true;
7667         config.peer_channel_config_limits.force_announced_channel_preference = false;
7668         config.channel_options.commit_upfront_shutdown_pubkey = false;
7669         let user_cfgs = [None, Some(config), None];
7670         let chanmon_cfgs = create_chanmon_cfgs(3);
7671         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7672         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7673         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7674
7675         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7676         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7677         check_added_monitors!(nodes[1], 1);
7678
7679         // Use a segwit v0 script supported even without option_shutdown_anysegwit
7680         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7681         node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
7682                 .push_slice(&[0; 20])
7683                 .into_script();
7684         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7685
7686         let events = nodes[0].node.get_and_clear_pending_msg_events();
7687         assert_eq!(events.len(), 2);
7688         match events[0] {
7689                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7690                 _ => panic!("Unexpected event"),
7691         }
7692         match events[1] {
7693                 MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7694                 _ => panic!("Unexpected event"),
7695         }
7696 }
7697
7698 #[test]
7699 fn test_anysegwit_shutdown_script() {
7700         let mut config = UserConfig::default();
7701         config.channel_options.announced_channel = true;
7702         config.peer_channel_config_limits.force_announced_channel_preference = false;
7703         config.channel_options.commit_upfront_shutdown_pubkey = false;
7704         let user_cfgs = [None, Some(config), None];
7705         let chanmon_cfgs = create_chanmon_cfgs(3);
7706         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7707         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7708         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7709
7710         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7711         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7712         check_added_monitors!(nodes[1], 1);
7713
7714         // Use a non-v0 segwit script supported by option_shutdown_anysegwit
7715         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7716         node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
7717                 .push_slice(&[0, 0])
7718                 .into_script();
7719         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7720
7721         let events = nodes[0].node.get_and_clear_pending_msg_events();
7722         assert_eq!(events.len(), 2);
7723         match events[0] {
7724                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7725                 _ => panic!("Unexpected event"),
7726         }
7727         match events[1] {
7728                 MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7729                 _ => panic!("Unexpected event"),
7730         }
7731 }
7732
7733 #[test]
7734 fn test_unsupported_anysegwit_shutdown_script() {
7735         let mut config = UserConfig::default();
7736         config.channel_options.announced_channel = true;
7737         config.peer_channel_config_limits.force_announced_channel_preference = false;
7738         config.channel_options.commit_upfront_shutdown_pubkey = false;
7739         let user_cfgs = [None, Some(config), None];
7740         let chanmon_cfgs = create_chanmon_cfgs(3);
7741         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7742         node_cfgs[0].features = InitFeatures::known().clear_shutdown_anysegwit();
7743         node_cfgs[1].features = InitFeatures::known().clear_shutdown_anysegwit();
7744         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7745         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7746
7747         // Check that using an unsupported shutdown script fails and a supported one succeeds.
7748         let supported_shutdown_script = chanmon_cfgs[1].keys_manager.get_shutdown_scriptpubkey();
7749         let unsupported_shutdown_script =
7750                 ShutdownScript::new_witness_program(NonZeroU8::new(16).unwrap(), &[0, 40]).unwrap();
7751         chanmon_cfgs[1].keys_manager
7752                 .expect(OnGetShutdownScriptpubkey { returns: unsupported_shutdown_script.clone() })
7753                 .expect(OnGetShutdownScriptpubkey { returns: supported_shutdown_script });
7754
7755         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, node_cfgs[0].features.clone(), node_cfgs[1].features.clone());
7756         match nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()) {
7757                 Err(APIError::IncompatibleShutdownScript { script }) => {
7758                         assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner());
7759                 },
7760                 Err(e) => panic!("Unexpected error: {:?}", e),
7761                 Ok(_) => panic!("Expected error"),
7762         }
7763         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7764         check_added_monitors!(nodes[1], 1);
7765
7766         // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit
7767         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7768         node_0_shutdown.scriptpubkey = unsupported_shutdown_script.into_inner();
7769         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_cfgs[1].features, &node_0_shutdown);
7770
7771         let events = nodes[0].node.get_and_clear_pending_msg_events();
7772         assert_eq!(events.len(), 2);
7773         match events[1] {
7774                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7775                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7776                         assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020028) from remote peer".to_owned());
7777                 },
7778                 _ => panic!("Unexpected event"),
7779         }
7780         check_added_monitors!(nodes[0], 1);
7781 }
7782
7783 #[test]
7784 fn test_invalid_shutdown_script() {
7785         let mut config = UserConfig::default();
7786         config.channel_options.announced_channel = true;
7787         config.peer_channel_config_limits.force_announced_channel_preference = false;
7788         config.channel_options.commit_upfront_shutdown_pubkey = false;
7789         let user_cfgs = [None, Some(config), None];
7790         let chanmon_cfgs = create_chanmon_cfgs(3);
7791         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7792         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7793         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7794
7795         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7796         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7797         check_added_monitors!(nodes[1], 1);
7798
7799         // Use a segwit v0 script with an unsupported witness program
7800         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7801         node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
7802                 .push_slice(&[0, 0])
7803                 .into_script();
7804         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7805
7806         let events = nodes[0].node.get_and_clear_pending_msg_events();
7807         assert_eq!(events.len(), 2);
7808         match events[1] {
7809                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7810                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7811                         assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
7812                 },
7813                 _ => panic!("Unexpected event"),
7814         }
7815         check_added_monitors!(nodes[0], 1);
7816 }
7817
7818 #[test]
7819 fn test_user_configurable_csv_delay() {
7820         // We test our channel constructors yield errors when we pass them absurd csv delay
7821
7822         let mut low_our_to_self_config = UserConfig::default();
7823         low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
7824         let mut high_their_to_self_config = UserConfig::default();
7825         high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
7826         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7827         let chanmon_cfgs = create_chanmon_cfgs(2);
7828         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7829         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7830         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7831
7832         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
7833         if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config) {
7834                 match error {
7835                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7836                         _ => panic!("Unexpected event"),
7837                 }
7838         } else { assert!(false) }
7839
7840         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
7841         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7842         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7843         open_channel.to_self_delay = 200;
7844         if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
7845                 match error {
7846                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7847                         _ => panic!("Unexpected event"),
7848                 }
7849         } else { assert!(false); }
7850
7851         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7852         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7853         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7854         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7855         accept_channel.to_self_delay = 200;
7856         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
7857         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7858                 match action {
7859                         &ErrorAction::SendErrorMessage { ref msg } => {
7860                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7861                         },
7862                         _ => { assert!(false); }
7863                 }
7864         } else { assert!(false); }
7865
7866         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
7867         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7868         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7869         open_channel.to_self_delay = 200;
7870         if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
7871                 match error {
7872                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7873                         _ => panic!("Unexpected event"),
7874                 }
7875         } else { assert!(false); }
7876 }
7877
7878 #[test]
7879 fn test_data_loss_protect() {
7880         // We want to be sure that :
7881         // * we don't broadcast our Local Commitment Tx in case of fallen behind
7882         //   (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
7883         // * we close channel in case of detecting other being fallen behind
7884         // * we are able to claim our own outputs thanks to to_remote being static
7885         // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
7886         let persister;
7887         let logger;
7888         let fee_estimator;
7889         let tx_broadcaster;
7890         let chain_source;
7891         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7892         // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
7893         // during signing due to revoked tx
7894         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7895         let keys_manager = &chanmon_cfgs[0].keys_manager;
7896         let monitor;
7897         let node_state_0;
7898         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7899         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7900         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7901
7902         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7903
7904         // Cache node A state before any channel update
7905         let previous_node_state = nodes[0].node.encode();
7906         let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
7907         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap();
7908
7909         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
7910         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
7911
7912         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7913         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7914
7915         // Restore node A from previous state
7916         logger = test_utils::TestLogger::with_id(format!("node {}", 0));
7917         let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
7918         chain_source = test_utils::TestChainSource::new(Network::Testnet);
7919         tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
7920         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
7921         persister = test_utils::TestPersister::new();
7922         monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
7923         node_state_0 = {
7924                 let mut channel_monitors = HashMap::new();
7925                 channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
7926                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
7927                         keys_manager: keys_manager,
7928                         fee_estimator: &fee_estimator,
7929                         chain_monitor: &monitor,
7930                         logger: &logger,
7931                         tx_broadcaster: &tx_broadcaster,
7932                         default_config: UserConfig::default(),
7933                         channel_monitors,
7934                 }).unwrap().1
7935         };
7936         nodes[0].node = &node_state_0;
7937         assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
7938         nodes[0].chain_monitor = &monitor;
7939         nodes[0].chain_source = &chain_source;
7940
7941         check_added_monitors!(nodes[0], 1);
7942
7943         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7944         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7945
7946         let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7947
7948         // Check we don't broadcast any transactions following learning of per_commitment_point from B
7949         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
7950         check_added_monitors!(nodes[0], 1);
7951
7952         {
7953                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7954                 assert_eq!(node_txn.len(), 0);
7955         }
7956
7957         let mut reestablish_1 = Vec::with_capacity(1);
7958         for msg in nodes[0].node.get_and_clear_pending_msg_events() {
7959                 if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
7960                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
7961                         reestablish_1.push(msg.clone());
7962                 } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
7963                 } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
7964                         match action {
7965                                 &ErrorAction::SendErrorMessage { ref msg } => {
7966                                         assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
7967                                 },
7968                                 _ => panic!("Unexpected event!"),
7969                         }
7970                 } else {
7971                         panic!("Unexpected event")
7972                 }
7973         }
7974
7975         // Check we close channel detecting A is fallen-behind
7976         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7977         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
7978         check_added_monitors!(nodes[1], 1);
7979
7980
7981         // Check A is able to claim to_remote output
7982         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7983         assert_eq!(node_txn.len(), 1);
7984         check_spends!(node_txn[0], chan.3);
7985         assert_eq!(node_txn[0].output.len(), 2);
7986         mine_transaction(&nodes[0], &node_txn[0]);
7987         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7988         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
7989         assert_eq!(spend_txn.len(), 1);
7990         check_spends!(spend_txn[0], node_txn[0]);
7991 }
7992
7993 #[test]
7994 fn test_check_htlc_underpaying() {
7995         // Send payment through A -> B but A is maliciously
7996         // sending a probe payment (i.e less than expected value0
7997         // to B, B should refuse payment.
7998
7999         let chanmon_cfgs = create_chanmon_cfgs(2);
8000         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8001         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8002         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8003
8004         // Create some initial channels
8005         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
8006
8007         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap();
8008         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
8009         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap();
8010         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
8011         check_added_monitors!(nodes[0], 1);
8012
8013         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8014         assert_eq!(events.len(), 1);
8015         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8016         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8017         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8018
8019         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
8020         // and then will wait a second random delay before failing the HTLC back:
8021         expect_pending_htlcs_forwardable!(nodes[1]);
8022         expect_pending_htlcs_forwardable!(nodes[1]);
8023
8024         // Node 3 is expecting payment of 100_000 but received 10_000,
8025         // it should fail htlc like we didn't know the preimage.
8026         nodes[1].node.process_pending_htlc_forwards();
8027
8028         let events = nodes[1].node.get_and_clear_pending_msg_events();
8029         assert_eq!(events.len(), 1);
8030         let (update_fail_htlc, commitment_signed) = match events[0] {
8031                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
8032                         assert!(update_add_htlcs.is_empty());
8033                         assert!(update_fulfill_htlcs.is_empty());
8034                         assert_eq!(update_fail_htlcs.len(), 1);
8035                         assert!(update_fail_malformed_htlcs.is_empty());
8036                         assert!(update_fee.is_none());
8037                         (update_fail_htlcs[0].clone(), commitment_signed)
8038                 },
8039                 _ => panic!("Unexpected event"),
8040         };
8041         check_added_monitors!(nodes[1], 1);
8042
8043         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
8044         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
8045
8046         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
8047         let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec();
8048         expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH));
8049         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
8050 }
8051
8052 #[test]
8053 fn test_announce_disable_channels() {
8054         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
8055         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
8056
8057         let chanmon_cfgs = create_chanmon_cfgs(2);
8058         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8059         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8060         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8061
8062         let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8063         let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8064         let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8065
8066         // Disconnect peers
8067         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8068         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
8069
8070         nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
8071         nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
8072         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
8073         assert_eq!(msg_events.len(), 3);
8074         let mut chans_disabled: HashSet<u64> = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
8075         for e in msg_events {
8076                 match e {
8077                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
8078                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
8079                                 // Check that each channel gets updated exactly once
8080                                 if !chans_disabled.remove(&msg.contents.short_channel_id) {
8081                                         panic!("Generated ChannelUpdate for wrong chan!");
8082                                 }
8083                         },
8084                         _ => panic!("Unexpected event"),
8085                 }
8086         }
8087         // Reconnect peers
8088         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8089         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
8090         assert_eq!(reestablish_1.len(), 3);
8091         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8092         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
8093         assert_eq!(reestablish_2.len(), 3);
8094
8095         // Reestablish chan_1
8096         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
8097         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
8098         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
8099         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
8100         // Reestablish chan_2
8101         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
8102         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
8103         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
8104         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
8105         // Reestablish chan_3
8106         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
8107         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
8108         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
8109         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
8110
8111         nodes[0].node.timer_tick_occurred();
8112         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
8113         nodes[0].node.timer_tick_occurred();
8114         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
8115         assert_eq!(msg_events.len(), 3);
8116         chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
8117         for e in msg_events {
8118                 match e {
8119                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
8120                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
8121                                 // Check that each channel gets updated exactly once
8122                                 if !chans_disabled.remove(&msg.contents.short_channel_id) {
8123                                         panic!("Generated ChannelUpdate for wrong chan!");
8124                                 }
8125                         },
8126                         _ => panic!("Unexpected event"),
8127                 }
8128         }
8129 }
8130
8131 #[test]
8132 fn test_priv_forwarding_rejection() {
8133         // If we have a private channel with outbound liquidity, and
8134         // UserConfig::accept_forwards_to_priv_channels is set to false, we should reject any attempts
8135         // to forward through that channel.
8136         let chanmon_cfgs = create_chanmon_cfgs(3);
8137         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8138         let mut no_announce_cfg = test_default_channel_config();
8139         no_announce_cfg.channel_options.announced_channel = false;
8140         no_announce_cfg.accept_forwards_to_priv_channels = false;
8141         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(no_announce_cfg), None]);
8142         let persister: test_utils::TestPersister;
8143         let new_chain_monitor: test_utils::TestChainMonitor;
8144         let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
8145         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8146
8147         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
8148
8149         // Note that the create_*_chan functions in utils requires announcement_signatures, which we do
8150         // not send for private channels.
8151         nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
8152         let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
8153         nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
8154         let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
8155         nodes[1].node.handle_accept_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
8156
8157         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[1], 1_000_000, 42);
8158         nodes[1].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
8159         nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id()));
8160         check_added_monitors!(nodes[2], 1);
8161
8162         nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()));
8163         check_added_monitors!(nodes[1], 1);
8164
8165         let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
8166         confirm_transaction_at(&nodes[1], &tx, conf_height);
8167         connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
8168         confirm_transaction_at(&nodes[2], &tx, conf_height);
8169         connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
8170         let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
8171         nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
8172         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
8173         nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
8174         get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8175
8176         assert!(nodes[0].node.list_usable_channels()[0].is_public);
8177         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
8178         assert!(!nodes[2].node.list_usable_channels()[0].is_public);
8179
8180         // We should always be able to forward through nodes[1] as long as its out through a public
8181         // channel:
8182         send_payment(&nodes[2], &[&nodes[1], &nodes[0]], 10_000);
8183
8184         // ... however, if we send to nodes[2], we will have to pass the private channel from nodes[1]
8185         // to nodes[2], which should be rejected:
8186         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
8187         let route = get_route(&nodes[0].node.get_our_node_id(),
8188                 &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
8189                 &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None,
8190                 &[&RouteHint(vec![RouteHintHop {
8191                         src_node_id: nodes[1].node.get_our_node_id(),
8192                         short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(),
8193                         fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 },
8194                         cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
8195                         htlc_minimum_msat: None,
8196                         htlc_maximum_msat: None,
8197                 }])], 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap();
8198
8199         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
8200         check_added_monitors!(nodes[0], 1);
8201         let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
8202         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8203         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
8204
8205         let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8206         assert!(htlc_fail_updates.update_add_htlcs.is_empty());
8207         assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1);
8208         assert!(htlc_fail_updates.update_fail_malformed_htlcs.is_empty());
8209         assert!(htlc_fail_updates.update_fee.is_none());
8210
8211         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
8212         commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true);
8213         expect_payment_failed!(nodes[0], our_payment_hash, false);
8214         expect_payment_failure_chan_update!(nodes[0], nodes[2].node.list_channels()[0].short_channel_id.unwrap(), true);
8215
8216         // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
8217         // to true. Sadly there is currently no way to change it at runtime.
8218
8219         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8220         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8221
8222         let nodes_1_serialized = nodes[1].node.encode();
8223         let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new());
8224         let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new());
8225         {
8226                 let mons = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap();
8227                 let mut mon_iter = mons.iter();
8228                 mon_iter.next().unwrap().1.write(&mut monitor_a_serialized).unwrap();
8229                 mon_iter.next().unwrap().1.write(&mut monitor_b_serialized).unwrap();
8230         }
8231
8232         persister = test_utils::TestPersister::new();
8233         let keys_manager = &chanmon_cfgs[1].keys_manager;
8234         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
8235         nodes[1].chain_monitor = &new_chain_monitor;
8236
8237         let mut monitor_a_read = &monitor_a_serialized.0[..];
8238         let mut monitor_b_read = &monitor_b_serialized.0[..];
8239         let (_, mut monitor_a) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_a_read, keys_manager).unwrap();
8240         let (_, mut monitor_b) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_b_read, keys_manager).unwrap();
8241         assert!(monitor_a_read.is_empty());
8242         assert!(monitor_b_read.is_empty());
8243
8244         no_announce_cfg.accept_forwards_to_priv_channels = true;
8245
8246         let mut nodes_1_read = &nodes_1_serialized[..];
8247         let (_, nodes_1_deserialized_tmp) = {
8248                 let mut channel_monitors = HashMap::new();
8249                 channel_monitors.insert(monitor_a.get_funding_txo().0, &mut monitor_a);
8250                 channel_monitors.insert(monitor_b.get_funding_txo().0, &mut monitor_b);
8251                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
8252                         default_config: no_announce_cfg,
8253                         keys_manager,
8254                         fee_estimator: node_cfgs[1].fee_estimator,
8255                         chain_monitor: nodes[1].chain_monitor,
8256                         tx_broadcaster: nodes[1].tx_broadcaster.clone(),
8257                         logger: nodes[1].logger,
8258                         channel_monitors,
8259                 }).unwrap()
8260         };
8261         assert!(nodes_1_read.is_empty());
8262         nodes_1_deserialized = nodes_1_deserialized_tmp;
8263
8264         assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
8265         assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
8266         check_added_monitors!(nodes[1], 2);
8267         nodes[1].node = &nodes_1_deserialized;
8268
8269         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
8270         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8271         let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
8272         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
8273         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
8274         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
8275         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8276         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
8277
8278         nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
8279         nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8280         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
8281         let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
8282         nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
8283         nodes[1].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &cs_reestablish);
8284         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
8285         get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8286
8287         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
8288         check_added_monitors!(nodes[0], 1);
8289         pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 10_000, our_payment_hash, our_payment_secret);
8290         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage);
8291 }
8292
8293 #[test]
8294 fn test_bump_penalty_txn_on_revoked_commitment() {
8295         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
8296         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
8297
8298         let chanmon_cfgs = create_chanmon_cfgs(2);
8299         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8300         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8301         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8302
8303         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8304         let logger = test_utils::TestLogger::new();
8305
8306         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
8307         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
8308         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000, 30, &logger).unwrap();
8309         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
8310
8311         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
8312         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
8313         assert_eq!(revoked_txn[0].output.len(), 4);
8314         assert_eq!(revoked_txn[0].input.len(), 1);
8315         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
8316         let revoked_txid = revoked_txn[0].txid();
8317
8318         let mut penalty_sum = 0;
8319         for outp in revoked_txn[0].output.iter() {
8320                 if outp.script_pubkey.is_v0_p2wsh() {
8321                         penalty_sum += outp.value;
8322                 }
8323         }
8324
8325         // Connect blocks to change height_timer range to see if we use right soonest_timelock
8326         let header_114 = connect_blocks(&nodes[1], 14);
8327
8328         // Actually revoke tx by claiming a HTLC
8329         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8330         let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8331         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
8332         check_added_monitors!(nodes[1], 1);
8333
8334         // One or more justice tx should have been broadcast, check it
8335         let penalty_1;
8336         let feerate_1;
8337         {
8338                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8339                 assert_eq!(node_txn.len(), 2); // justice tx (broadcasted from ChannelMonitor) + local commitment tx
8340                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8341                 assert_eq!(node_txn[0].output.len(), 1);
8342                 check_spends!(node_txn[0], revoked_txn[0]);
8343                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
8344                 feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
8345                 penalty_1 = node_txn[0].txid();
8346                 node_txn.clear();
8347         };
8348
8349         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
8350         connect_blocks(&nodes[1], 15);
8351         let mut penalty_2 = penalty_1;
8352         let mut feerate_2 = 0;
8353         {
8354                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8355                 assert_eq!(node_txn.len(), 1);
8356                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
8357                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8358                         assert_eq!(node_txn[0].output.len(), 1);
8359                         check_spends!(node_txn[0], revoked_txn[0]);
8360                         penalty_2 = node_txn[0].txid();
8361                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
8362                         assert_ne!(penalty_2, penalty_1);
8363                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
8364                         feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
8365                         // Verify 25% bump heuristic
8366                         assert!(feerate_2 * 100 >= feerate_1 * 125);
8367                         node_txn.clear();
8368                 }
8369         }
8370         assert_ne!(feerate_2, 0);
8371
8372         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
8373         connect_blocks(&nodes[1], 1);
8374         let penalty_3;
8375         let mut feerate_3 = 0;
8376         {
8377                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8378                 assert_eq!(node_txn.len(), 1);
8379                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
8380                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8381                         assert_eq!(node_txn[0].output.len(), 1);
8382                         check_spends!(node_txn[0], revoked_txn[0]);
8383                         penalty_3 = node_txn[0].txid();
8384                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
8385                         assert_ne!(penalty_3, penalty_2);
8386                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
8387                         feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
8388                         // Verify 25% bump heuristic
8389                         assert!(feerate_3 * 100 >= feerate_2 * 125);
8390                         node_txn.clear();
8391                 }
8392         }
8393         assert_ne!(feerate_3, 0);
8394
8395         nodes[1].node.get_and_clear_pending_events();
8396         nodes[1].node.get_and_clear_pending_msg_events();
8397 }
8398
8399 #[test]
8400 fn test_bump_penalty_txn_on_revoked_htlcs() {
8401         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
8402         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
8403
8404         let mut chanmon_cfgs = create_chanmon_cfgs(2);
8405         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
8406         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8407         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8408         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8409
8410         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8411         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
8412         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
8413                 &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap();
8414         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
8415         let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph.read().unwrap(),
8416                 &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap();
8417         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
8418
8419         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
8420         assert_eq!(revoked_local_txn[0].input.len(), 1);
8421         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
8422
8423         // Revoke local commitment tx
8424         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8425
8426         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8427         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
8428         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
8429         check_closed_broadcast!(nodes[1], true);
8430         check_added_monitors!(nodes[1], 1);
8431         connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
8432
8433         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8434         assert_eq!(revoked_htlc_txn.len(), 3);
8435         check_spends!(revoked_htlc_txn[1], chan.3);
8436
8437         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
8438         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
8439         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
8440
8441         assert_eq!(revoked_htlc_txn[2].input.len(), 1);
8442         assert_eq!(revoked_htlc_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8443         assert_eq!(revoked_htlc_txn[2].output.len(), 1);
8444         check_spends!(revoked_htlc_txn[2], revoked_local_txn[0]);
8445
8446         // Broadcast set of revoked txn on A
8447         let hash_128 = connect_blocks(&nodes[0], 40);
8448         let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8449         connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
8450         let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8451         connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
8452         expect_pending_htlcs_forwardable_ignore!(nodes[0]);
8453         let first;
8454         let feerate_1;
8455         let penalty_txn;
8456         {
8457                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8458                 assert_eq!(node_txn.len(), 5); // 3 penalty txn on revoked commitment tx + A commitment tx + 1 penalty tnx on revoked HTLC txn
8459                 // Verify claim tx are spending revoked HTLC txn
8460
8461                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
8462                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
8463                 // which are included in the same block (they are broadcasted because we scan the
8464                 // transactions linearly and generate claims as we go, they likely should be removed in the
8465                 // future).
8466                 assert_eq!(node_txn[0].input.len(), 1);
8467                 check_spends!(node_txn[0], revoked_local_txn[0]);
8468                 assert_eq!(node_txn[1].input.len(), 1);
8469                 check_spends!(node_txn[1], revoked_local_txn[0]);
8470                 assert_eq!(node_txn[2].input.len(), 1);
8471                 check_spends!(node_txn[2], revoked_local_txn[0]);
8472
8473                 // Each of the three justice transactions claim a separate (single) output of the three
8474                 // available, which we check here:
8475                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
8476                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
8477                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
8478
8479                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
8480                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8481
8482                 // node_txn[3] is the local commitment tx broadcast just because (and somewhat in case of
8483                 // reorgs, though its not clear its ever worth broadcasting conflicting txn like this when
8484                 // a remote commitment tx has already been confirmed).
8485                 check_spends!(node_txn[3], chan.3);
8486
8487                 // node_txn[4] spends the revoked outputs from the revoked_htlc_txn (which only have one
8488                 // output, checked above).
8489                 assert_eq!(node_txn[4].input.len(), 2);
8490                 assert_eq!(node_txn[4].output.len(), 1);
8491                 check_spends!(node_txn[4], revoked_htlc_txn[0], revoked_htlc_txn[2]);
8492
8493                 first = node_txn[4].txid();
8494                 // Store both feerates for later comparison
8495                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value;
8496                 feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
8497                 penalty_txn = vec![node_txn[2].clone()];
8498                 node_txn.clear();
8499         }
8500
8501         // Connect one more block to see if bumped penalty are issued for HTLC txn
8502         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8503         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
8504         let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8505         connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
8506         {
8507                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8508                 assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
8509
8510                 check_spends!(node_txn[0], revoked_local_txn[0]);
8511                 check_spends!(node_txn[1], revoked_local_txn[0]);
8512                 // Note that these are both bogus - they spend outputs already claimed in block 129:
8513                 if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output  {
8514                         assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8515                 } else {
8516                         assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8517                         assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
8518                 }
8519
8520                 node_txn.clear();
8521         };
8522
8523         // Few more blocks to confirm penalty txn
8524         connect_blocks(&nodes[0], 4);
8525         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
8526         let header_144 = connect_blocks(&nodes[0], 9);
8527         let node_txn = {
8528                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8529                 assert_eq!(node_txn.len(), 1);
8530
8531                 assert_eq!(node_txn[0].input.len(), 2);
8532                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[2]);
8533                 // Verify bumped tx is different and 25% bump heuristic
8534                 assert_ne!(first, node_txn[0].txid());
8535                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value;
8536                 let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
8537                 assert!(feerate_2 * 100 > feerate_1 * 125);
8538                 let txn = vec![node_txn[0].clone()];
8539                 node_txn.clear();
8540                 txn
8541         };
8542         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
8543         let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8544         connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
8545         connect_blocks(&nodes[0], 20);
8546         {
8547                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8548                 // We verify than no new transaction has been broadcast because previously
8549                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
8550                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
8551                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
8552                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
8553                 // up bumped justice generation.
8554                 assert_eq!(node_txn.len(), 0);
8555                 node_txn.clear();
8556         }
8557         check_closed_broadcast!(nodes[0], true);
8558         check_added_monitors!(nodes[0], 1);
8559 }
8560
8561 #[test]
8562 fn test_bump_penalty_txn_on_remote_commitment() {
8563         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
8564         // we're able to claim outputs on remote commitment transaction before timelocks expiration
8565
8566         // Create 2 HTLCs
8567         // Provide preimage for one
8568         // Check aggregation
8569
8570         let chanmon_cfgs = create_chanmon_cfgs(2);
8571         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8572         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8573         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8574
8575         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8576         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
8577         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
8578
8579         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
8580         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
8581         assert_eq!(remote_txn[0].output.len(), 4);
8582         assert_eq!(remote_txn[0].input.len(), 1);
8583         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
8584
8585         // Claim a HTLC without revocation (provide B monitor with preimage)
8586         nodes[1].node.claim_funds(payment_preimage);
8587         mine_transaction(&nodes[1], &remote_txn[0]);
8588         check_added_monitors!(nodes[1], 2);
8589         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
8590
8591         // One or more claim tx should have been broadcast, check it
8592         let timeout;
8593         let preimage;
8594         let preimage_bump;
8595         let feerate_timeout;
8596         let feerate_preimage;
8597         {
8598                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8599                 // 9 transactions including:
8600                 // 1*2 ChannelManager local broadcasts of commitment + HTLC-Success
8601                 // 1*3 ChannelManager local broadcasts of commitment + HTLC-Success + HTLC-Timeout
8602                 // 2 * HTLC-Success (one RBF bump we'll check later)
8603                 // 1 * HTLC-Timeout
8604                 assert_eq!(node_txn.len(), 8);
8605                 assert_eq!(node_txn[0].input.len(), 1);
8606                 assert_eq!(node_txn[6].input.len(), 1);
8607                 check_spends!(node_txn[0], remote_txn[0]);
8608                 check_spends!(node_txn[6], remote_txn[0]);
8609                 assert_eq!(node_txn[0].input[0].previous_output, node_txn[3].input[0].previous_output);
8610                 preimage_bump = node_txn[3].clone();
8611
8612                 check_spends!(node_txn[1], chan.3);
8613                 check_spends!(node_txn[2], node_txn[1]);
8614                 assert_eq!(node_txn[1], node_txn[4]);
8615                 assert_eq!(node_txn[2], node_txn[5]);
8616
8617                 timeout = node_txn[6].txid();
8618                 let index = node_txn[6].input[0].previous_output.vout;
8619                 let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value;
8620                 feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64;
8621
8622                 preimage = node_txn[0].txid();
8623                 let index = node_txn[0].input[0].previous_output.vout;
8624                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
8625                 feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64;
8626
8627                 node_txn.clear();
8628         };
8629         assert_ne!(feerate_timeout, 0);
8630         assert_ne!(feerate_preimage, 0);
8631
8632         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
8633         connect_blocks(&nodes[1], 15);
8634         {
8635                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8636                 assert_eq!(node_txn.len(), 1);
8637                 assert_eq!(node_txn[0].input.len(), 1);
8638                 assert_eq!(preimage_bump.input.len(), 1);
8639                 check_spends!(node_txn[0], remote_txn[0]);
8640                 check_spends!(preimage_bump, remote_txn[0]);
8641
8642                 let index = preimage_bump.input[0].previous_output.vout;
8643                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
8644                 let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64;
8645                 assert!(new_feerate * 100 > feerate_timeout * 125);
8646                 assert_ne!(timeout, preimage_bump.txid());
8647
8648                 let index = node_txn[0].input[0].previous_output.vout;
8649                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
8650                 let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64;
8651                 assert!(new_feerate * 100 > feerate_preimage * 125);
8652                 assert_ne!(preimage, node_txn[0].txid());
8653
8654                 node_txn.clear();
8655         }
8656
8657         nodes[1].node.get_and_clear_pending_events();
8658         nodes[1].node.get_and_clear_pending_msg_events();
8659 }
8660
8661 #[test]
8662 fn test_counterparty_raa_skip_no_crash() {
8663         // Previously, if our counterparty sent two RAAs in a row without us having provided a
8664         // commitment transaction, we would have happily carried on and provided them the next
8665         // commitment transaction based on one RAA forward. This would probably eventually have led to
8666         // channel closure, but it would not have resulted in funds loss. Still, our
8667         // EnforcingSigner would have paniced as it doesn't like jumps into the future. Here, we
8668         // check simply that the channel is closed in response to such an RAA, but don't check whether
8669         // we decide to punish our counterparty for revoking their funds (as we don't currently
8670         // implement that).
8671         let chanmon_cfgs = create_chanmon_cfgs(2);
8672         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8673         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8674         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8675         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
8676
8677         let mut guard = nodes[0].node.channel_state.lock().unwrap();
8678         let keys = &guard.by_id.get_mut(&channel_id).unwrap().get_signer();
8679         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
8680         let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
8681         // Must revoke without gaps
8682         keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
8683         let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
8684                 &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
8685
8686         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
8687                 &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
8688         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
8689         check_added_monitors!(nodes[1], 1);
8690 }
8691
8692 #[test]
8693 fn test_bump_txn_sanitize_tracking_maps() {
8694         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
8695         // verify we clean then right after expiration of ANTI_REORG_DELAY.
8696
8697         let chanmon_cfgs = create_chanmon_cfgs(2);
8698         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8699         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8700         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8701
8702         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8703         // Lock HTLC in both directions
8704         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8705         route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
8706
8707         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
8708         assert_eq!(revoked_local_txn[0].input.len(), 1);
8709         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
8710
8711         // Revoke local commitment tx
8712         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8713
8714         // Broadcast set of revoked txn on A
8715         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
8716         expect_pending_htlcs_forwardable_ignore!(nodes[0]);
8717         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
8718
8719         mine_transaction(&nodes[0], &revoked_local_txn[0]);
8720         check_closed_broadcast!(nodes[0], true);
8721         check_added_monitors!(nodes[0], 1);
8722         let penalty_txn = {
8723                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8724                 assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
8725                 check_spends!(node_txn[0], revoked_local_txn[0]);
8726                 check_spends!(node_txn[1], revoked_local_txn[0]);
8727                 check_spends!(node_txn[2], revoked_local_txn[0]);
8728                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
8729                 node_txn.clear();
8730                 penalty_txn
8731         };
8732         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8733         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
8734         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8735         {
8736                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
8737                 if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
8738                         assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
8739                         assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
8740                 }
8741         }
8742 }
8743
8744 #[test]
8745 fn test_override_channel_config() {
8746         let chanmon_cfgs = create_chanmon_cfgs(2);
8747         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8748         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8749         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8750
8751         // Node0 initiates a channel to node1 using the override config.
8752         let mut override_config = UserConfig::default();
8753         override_config.own_channel_config.our_to_self_delay = 200;
8754
8755         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
8756
8757         // Assert the channel created by node0 is using the override config.
8758         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8759         assert_eq!(res.channel_flags, 0);
8760         assert_eq!(res.to_self_delay, 200);
8761 }
8762
8763 #[test]
8764 fn test_override_0msat_htlc_minimum() {
8765         let mut zero_config = UserConfig::default();
8766         zero_config.own_channel_config.our_htlc_minimum_msat = 0;
8767         let chanmon_cfgs = create_chanmon_cfgs(2);
8768         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8769         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
8770         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8771
8772         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
8773         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8774         assert_eq!(res.htlc_minimum_msat, 1);
8775
8776         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
8777         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8778         assert_eq!(res.htlc_minimum_msat, 1);
8779 }
8780
8781 #[test]
8782 fn test_simple_mpp() {
8783         // Simple test of sending a multi-path payment.
8784         let chanmon_cfgs = create_chanmon_cfgs(4);
8785         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8786         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8787         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8788
8789         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8790         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8791         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8792         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8793         let logger = test_utils::TestLogger::new();
8794
8795         let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
8796         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8797         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
8798         let path = route.paths[0].clone();
8799         route.paths.push(path);
8800         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
8801         route.paths[0][0].short_channel_id = chan_1_id;
8802         route.paths[0][1].short_channel_id = chan_3_id;
8803         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
8804         route.paths[1][0].short_channel_id = chan_2_id;
8805         route.paths[1][1].short_channel_id = chan_4_id;
8806         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8807         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8808 }
8809
8810 #[test]
8811 fn test_preimage_storage() {
8812         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8813         let chanmon_cfgs = create_chanmon_cfgs(2);
8814         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8815         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8816         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8817
8818         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8819
8820         {
8821                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, 42);
8822
8823                 let logger = test_utils::TestLogger::new();
8824                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8825                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8826                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
8827                 check_added_monitors!(nodes[0], 1);
8828                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8829                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8830                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8831                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8832         }
8833         // Note that after leaving the above scope we have no knowledge of any arguments or return
8834         // values from previous calls.
8835         expect_pending_htlcs_forwardable!(nodes[1]);
8836         let events = nodes[1].node.get_and_clear_pending_events();
8837         assert_eq!(events.len(), 1);
8838         match events[0] {
8839                 Event::PaymentReceived { ref purpose, .. } => {
8840                         match &purpose {
8841                                 PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => {
8842                                         assert_eq!(*user_payment_id, 42);
8843                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8844                                 },
8845                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8846                         }
8847                 },
8848                 _ => panic!("Unexpected event"),
8849         }
8850 }
8851
8852 #[test]
8853 fn test_secret_timeout() {
8854         // Simple test of payment secret storage time outs
8855         let chanmon_cfgs = create_chanmon_cfgs(2);
8856         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8857         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8858         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8859
8860         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8861
8862         let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
8863
8864         // We should fail to register the same payment hash twice, at least until we've connected a
8865         // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
8866         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
8867                 assert_eq!(err, "Duplicate payment hash");
8868         } else { panic!(); }
8869         let mut block = {
8870                 let node_1_blocks = nodes[1].blocks.lock().unwrap();
8871                 Block {
8872                         header: BlockHeader {
8873                                 version: 0x2000000,
8874                                 prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
8875                                 merkle_root: Default::default(),
8876                                 time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
8877                         txdata: vec![],
8878                 }
8879         };
8880         connect_block(&nodes[1], &block);
8881         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
8882                 assert_eq!(err, "Duplicate payment hash");
8883         } else { panic!(); }
8884
8885         // If we then connect the second block, we should be able to register the same payment hash
8886         // again with a different user_payment_id (this time getting a new payment secret).
8887         block.header.prev_blockhash = block.header.block_hash();
8888         block.header.time += 1;
8889         connect_block(&nodes[1], &block);
8890         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 42).unwrap();
8891         assert_ne!(payment_secret_1, our_payment_secret);
8892
8893         {
8894                 let logger = test_utils::TestLogger::new();
8895                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8896                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8897                 nodes[0].node.send_payment(&route, payment_hash, &Some(our_payment_secret)).unwrap();
8898                 check_added_monitors!(nodes[0], 1);
8899                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8900                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8901                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8902                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8903         }
8904         // Note that after leaving the above scope we have no knowledge of any arguments or return
8905         // values from previous calls.
8906         expect_pending_htlcs_forwardable!(nodes[1]);
8907         let events = nodes[1].node.get_and_clear_pending_events();
8908         assert_eq!(events.len(), 1);
8909         match events[0] {
8910                 Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => {
8911                         assert!(payment_preimage.is_none());
8912                         assert_eq!(user_payment_id, 42);
8913                         assert_eq!(payment_secret, our_payment_secret);
8914                         // We don't actually have the payment preimage with which to claim this payment!
8915                 },
8916                 _ => panic!("Unexpected event"),
8917         }
8918 }
8919
8920 #[test]
8921 fn test_bad_secret_hash() {
8922         // Simple test of unregistered payment hash/invalid payment secret handling
8923         let chanmon_cfgs = create_chanmon_cfgs(2);
8924         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8925         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8926         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8927
8928         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8929
8930         let random_payment_hash = PaymentHash([42; 32]);
8931         let random_payment_secret = PaymentSecret([43; 32]);
8932         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
8933
8934         let logger = test_utils::TestLogger::new();
8935         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8936         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8937
8938         // All the below cases should end up being handled exactly identically, so we macro the
8939         // resulting events.
8940         macro_rules! handle_unknown_invalid_payment_data {
8941                 () => {
8942                         check_added_monitors!(nodes[0], 1);
8943                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8944                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8945                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8946                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8947
8948                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8949                         // again to process the pending backwards-failure of the HTLC
8950                         expect_pending_htlcs_forwardable!(nodes[1]);
8951                         expect_pending_htlcs_forwardable!(nodes[1]);
8952                         check_added_monitors!(nodes[1], 1);
8953
8954                         // We should fail the payment back
8955                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8956                         match events.pop().unwrap() {
8957                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8958                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8959                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8960                                 },
8961                                 _ => panic!("Unexpected event"),
8962                         }
8963                 }
8964         }
8965
8966         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8967         // Error data is the HTLC value (100,000) and current block height
8968         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8969
8970         // Send a payment with the right payment hash but the wrong payment secret
8971         nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
8972         handle_unknown_invalid_payment_data!();
8973         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8974
8975         // Send a payment with a random payment hash, but the right payment secret
8976         nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
8977         handle_unknown_invalid_payment_data!();
8978         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8979
8980         // Send a payment with a random payment hash and random payment secret
8981         nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
8982         handle_unknown_invalid_payment_data!();
8983         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8984 }
8985
8986 #[test]
8987 fn test_update_err_monitor_lockdown() {
8988         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8989         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8990         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateErr.
8991         //
8992         // This scenario may happen in a watchtower setup, where watchtower process a block height
8993         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8994         // commitment at same time.
8995
8996         let chanmon_cfgs = create_chanmon_cfgs(2);
8997         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8998         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8999         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9000
9001         // Create some initial channel
9002         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
9003         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
9004
9005         // Rebalance the network to generate htlc in the two directions
9006         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
9007
9008         // Route a HTLC from node 0 to node 1 (but don't settle)
9009         let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
9010
9011         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
9012         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
9013         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
9014         let persister = test_utils::TestPersister::new();
9015         let watchtower = {
9016                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
9017                 let monitor = monitors.get(&outpoint).unwrap();
9018                 let mut w = test_utils::TestVecWriter(Vec::new());
9019                 monitor.write(&mut w).unwrap();
9020                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
9021                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
9022                 assert!(new_monitor == *monitor);
9023                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
9024                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
9025                 watchtower
9026         };
9027         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9028         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
9029         // transaction lock time requirements here.
9030         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (header, 0));
9031         watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
9032
9033         // Try to update ChannelMonitor
9034         assert!(nodes[1].node.claim_funds(preimage));
9035         check_added_monitors!(nodes[1], 1);
9036         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9037         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
9038         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
9039         if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
9040                 if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
9041                         if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
9042                         if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
9043                 } else { assert!(false); }
9044         } else { assert!(false); };
9045         // Our local monitor is in-sync and hasn't processed yet timeout
9046         check_added_monitors!(nodes[0], 1);
9047         let events = nodes[0].node.get_and_clear_pending_events();
9048         assert_eq!(events.len(), 1);
9049 }
9050
9051 #[test]
9052 fn test_concurrent_monitor_claim() {
9053         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
9054         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
9055         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
9056         // state N+1 confirms. Alice claims output from state N+1.
9057
9058         let chanmon_cfgs = create_chanmon_cfgs(2);
9059         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9060         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9061         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9062
9063         // Create some initial channel
9064         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
9065         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
9066
9067         // Rebalance the network to generate htlc in the two directions
9068         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
9069
9070         // Route a HTLC from node 0 to node 1 (but don't settle)
9071         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
9072
9073         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
9074         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
9075         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
9076         let persister = test_utils::TestPersister::new();
9077         let watchtower_alice = {
9078                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
9079                 let monitor = monitors.get(&outpoint).unwrap();
9080                 let mut w = test_utils::TestVecWriter(Vec::new());
9081                 monitor.write(&mut w).unwrap();
9082                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
9083                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
9084                 assert!(new_monitor == *monitor);
9085                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
9086                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
9087                 watchtower
9088         };
9089         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9090         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
9091         // transaction lock time requirements here.
9092         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (header, 0));
9093         watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
9094
9095         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
9096         {
9097                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
9098                 assert_eq!(txn.len(), 2);
9099                 txn.clear();
9100         }
9101
9102         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
9103         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
9104         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
9105         let persister = test_utils::TestPersister::new();
9106         let watchtower_bob = {
9107                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
9108                 let monitor = monitors.get(&outpoint).unwrap();
9109                 let mut w = test_utils::TestVecWriter(Vec::new());
9110                 monitor.write(&mut w).unwrap();
9111                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
9112                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
9113                 assert!(new_monitor == *monitor);
9114                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
9115                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
9116                 watchtower
9117         };
9118         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9119         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
9120
9121         // Route another payment to generate another update with still previous HTLC pending
9122         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
9123         {
9124                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
9125                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000 , TEST_FINAL_CLTV, &logger).unwrap();
9126                 nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
9127         }
9128         check_added_monitors!(nodes[1], 1);
9129
9130         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9131         assert_eq!(updates.update_add_htlcs.len(), 1);
9132         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
9133         if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
9134                 if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
9135                         // Watchtower Alice should already have seen the block and reject the update
9136                         if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
9137                         if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
9138                         if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
9139                 } else { assert!(false); }
9140         } else { assert!(false); };
9141         // Our local monitor is in-sync and hasn't processed yet timeout
9142         check_added_monitors!(nodes[0], 1);
9143
9144         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
9145         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9146         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
9147
9148         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
9149         let bob_state_y;
9150         {
9151                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
9152                 assert_eq!(txn.len(), 2);
9153                 bob_state_y = txn[0].clone();
9154                 txn.clear();
9155         };
9156
9157         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
9158         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9159         watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
9160         {
9161                 let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
9162                 // We broadcast twice the transaction, once due to the HTLC-timeout, once due
9163                 // the onchain detection of the HTLC output
9164                 assert_eq!(htlc_txn.len(), 2);
9165                 check_spends!(htlc_txn[0], bob_state_y);
9166                 check_spends!(htlc_txn[1], bob_state_y);
9167         }
9168 }
9169
9170 #[test]
9171 fn test_pre_lockin_no_chan_closed_update() {
9172         // Test that if a peer closes a channel in response to a funding_created message we don't
9173         // generate a channel update (as the channel cannot appear on chain without a funding_signed
9174         // message).
9175         //
9176         // Doing so would imply a channel monitor update before the initial channel monitor
9177         // registration, violating our API guarantees.
9178         //
9179         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
9180         // then opening a second channel with the same funding output as the first (which is not
9181         // rejected because the first channel does not exist in the ChannelManager) and closing it
9182         // before receiving funding_signed.
9183         let chanmon_cfgs = create_chanmon_cfgs(2);
9184         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9185         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9186         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9187
9188         // Create an initial channel
9189         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9190         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9191         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9192         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9193         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
9194
9195         // Move the first channel through the funding flow...
9196         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42);
9197
9198         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
9199         check_added_monitors!(nodes[0], 0);
9200
9201         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9202         let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
9203         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
9204         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
9205 }
9206
9207 #[test]
9208 fn test_htlc_no_detection() {
9209         // This test is a mutation to underscore the detection logic bug we had
9210         // before #653. HTLC value routed is above the remaining balance, thus
9211         // inverting HTLC and `to_remote` output. HTLC will come second and
9212         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
9213         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
9214         // outputs order detection for correct spending children filtring.
9215
9216         let chanmon_cfgs = create_chanmon_cfgs(2);
9217         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9218         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9219         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9220
9221         // Create some initial channels
9222         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9223
9224         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
9225         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
9226         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
9227         assert_eq!(local_txn[0].input.len(), 1);
9228         assert_eq!(local_txn[0].output.len(), 3);
9229         check_spends!(local_txn[0], chan_1.3);
9230
9231         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
9232         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9233         connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
9234         // We deliberately connect the local tx twice as this should provoke a failure calling
9235         // this test before #653 fix.
9236         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
9237         check_closed_broadcast!(nodes[0], true);
9238         check_added_monitors!(nodes[0], 1);
9239         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
9240
9241         let htlc_timeout = {
9242                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
9243                 assert_eq!(node_txn[1].input.len(), 1);
9244                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
9245                 check_spends!(node_txn[1], local_txn[0]);
9246                 node_txn[1].clone()
9247         };
9248
9249         let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9250         connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
9251         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
9252         expect_payment_failed!(nodes[0], our_payment_hash, true);
9253 }
9254
9255 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
9256         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
9257         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
9258         // Carol, Alice would be the upstream node, and Carol the downstream.)
9259         //
9260         // Steps of the test:
9261         // 1) Alice sends a HTLC to Carol through Bob.
9262         // 2) Carol doesn't settle the HTLC.
9263         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
9264         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
9265         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
9266         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
9267         // 5) Carol release the preimage to Bob off-chain.
9268         // 6) Bob claims the offered output on the broadcasted commitment.
9269         let chanmon_cfgs = create_chanmon_cfgs(3);
9270         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9271         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9272         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9273
9274         // Create some initial channels
9275         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9276         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9277
9278         // Steps (1) and (2):
9279         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
9280         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000);
9281
9282         // Check that Alice's commitment transaction now contains an output for this HTLC.
9283         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
9284         check_spends!(alice_txn[0], chan_ab.3);
9285         assert_eq!(alice_txn[0].output.len(), 2);
9286         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
9287         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
9288         assert_eq!(alice_txn.len(), 2);
9289
9290         // Steps (3) and (4):
9291         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
9292         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
9293         let mut force_closing_node = 0; // Alice force-closes
9294         if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
9295         nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
9296         check_closed_broadcast!(nodes[force_closing_node], true);
9297         check_added_monitors!(nodes[force_closing_node], 1);
9298         if go_onchain_before_fulfill {
9299                 let txn_to_broadcast = match broadcast_alice {
9300                         true => alice_txn.clone(),
9301                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
9302                 };
9303                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
9304                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
9305                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
9306                 if broadcast_alice {
9307                         check_closed_broadcast!(nodes[1], true);
9308                         check_added_monitors!(nodes[1], 1);
9309                 }
9310                 assert_eq!(bob_txn.len(), 1);
9311                 check_spends!(bob_txn[0], chan_ab.3);
9312         }
9313
9314         // Step (5):
9315         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
9316         // process of removing the HTLC from their commitment transactions.
9317         assert!(nodes[2].node.claim_funds(payment_preimage));
9318         check_added_monitors!(nodes[2], 1);
9319         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
9320         assert!(carol_updates.update_add_htlcs.is_empty());
9321         assert!(carol_updates.update_fail_htlcs.is_empty());
9322         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
9323         assert!(carol_updates.update_fee.is_none());
9324         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
9325
9326         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
9327         expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
9328         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
9329         if !go_onchain_before_fulfill && broadcast_alice {
9330                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9331                 assert_eq!(events.len(), 1);
9332                 match events[0] {
9333                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
9334                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9335                         },
9336                         _ => panic!("Unexpected event"),
9337                 };
9338         }
9339         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
9340         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
9341         // Carol<->Bob's updated commitment transaction info.
9342         check_added_monitors!(nodes[1], 2);
9343
9344         let events = nodes[1].node.get_and_clear_pending_msg_events();
9345         assert_eq!(events.len(), 2);
9346         let bob_revocation = match events[0] {
9347                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
9348                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
9349                         (*msg).clone()
9350                 },
9351                 _ => panic!("Unexpected event"),
9352         };
9353         let bob_updates = match events[1] {
9354                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
9355                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
9356                         (*updates).clone()
9357                 },
9358                 _ => panic!("Unexpected event"),
9359         };
9360
9361         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
9362         check_added_monitors!(nodes[2], 1);
9363         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
9364         check_added_monitors!(nodes[2], 1);
9365
9366         let events = nodes[2].node.get_and_clear_pending_msg_events();
9367         assert_eq!(events.len(), 1);
9368         let carol_revocation = match events[0] {
9369                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
9370                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
9371                         (*msg).clone()
9372                 },
9373                 _ => panic!("Unexpected event"),
9374         };
9375         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
9376         check_added_monitors!(nodes[1], 1);
9377
9378         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
9379         // here's where we put said channel's commitment tx on-chain.
9380         let mut txn_to_broadcast = alice_txn.clone();
9381         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
9382         if !go_onchain_before_fulfill {
9383                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
9384                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
9385                 // If Bob was the one to force-close, he will have already passed these checks earlier.
9386                 if broadcast_alice {
9387                         check_closed_broadcast!(nodes[1], true);
9388                         check_added_monitors!(nodes[1], 1);
9389                 }
9390                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
9391                 if broadcast_alice {
9392                         // In `connect_block()`, the ChainMonitor and ChannelManager are separately notified about a
9393                         // new block being connected. The ChannelManager being notified triggers a monitor update,
9394                         // which triggers broadcasting our commitment tx and an HTLC-claiming tx. The ChainMonitor
9395                         // being notified triggers the HTLC-claiming tx redundantly, resulting in 3 total txs being
9396                         // broadcasted.
9397                         assert_eq!(bob_txn.len(), 3);
9398                         check_spends!(bob_txn[1], chan_ab.3);
9399                 } else {
9400                         assert_eq!(bob_txn.len(), 2);
9401                         check_spends!(bob_txn[0], chan_ab.3);
9402                 }
9403         }
9404
9405         // Step (6):
9406         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
9407         // broadcasted commitment transaction.
9408         {
9409                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
9410                 if go_onchain_before_fulfill {
9411                         // Bob should now have an extra broadcasted tx, for the preimage-claiming transaction.
9412                         assert_eq!(bob_txn.len(), 2);
9413                 }
9414                 let script_weight = match broadcast_alice {
9415                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
9416                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
9417                 };
9418                 // If Alice force-closed and Bob didn't receive her commitment transaction until after he
9419                 // received Carol's fulfill, he broadcasts the HTLC-output-claiming transaction first. Else if
9420                 // Bob force closed or if he found out about Alice's commitment tx before receiving Carol's
9421                 // fulfill, then he broadcasts the HTLC-output-claiming transaction second.
9422                 if broadcast_alice && !go_onchain_before_fulfill {
9423                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
9424                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
9425                 } else {
9426                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
9427                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
9428                 }
9429         }
9430 }
9431
9432 #[test]
9433 fn test_onchain_htlc_settlement_after_close() {
9434         do_test_onchain_htlc_settlement_after_close(true, true);
9435         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
9436         do_test_onchain_htlc_settlement_after_close(true, false);
9437         do_test_onchain_htlc_settlement_after_close(false, false);
9438 }
9439
9440 #[test]
9441 fn test_duplicate_chan_id() {
9442         // Test that if a given peer tries to open a channel with the same channel_id as one that is
9443         // already open we reject it and keep the old channel.
9444         //
9445         // Previously, full_stack_target managed to figure out that if you tried to open two channels
9446         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9447         // the existing channel when we detect the duplicate new channel, screwing up our monitor
9448         // updating logic for the existing channel.
9449         let chanmon_cfgs = create_chanmon_cfgs(2);
9450         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9451         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9452         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9453
9454         // Create an initial channel
9455         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9456         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9457         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9458         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9459
9460         // Try to create a second channel with the same temporary_channel_id as the first and check
9461         // that it is rejected.
9462         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9463         {
9464                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9465                 assert_eq!(events.len(), 1);
9466                 match events[0] {
9467                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9468                                 // Technically, at this point, nodes[1] would be justified in thinking both the
9469                                 // first (valid) and second (invalid) channels are closed, given they both have
9470                                 // the same non-temporary channel_id. However, currently we do not, so we just
9471                                 // move forward with it.
9472                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
9473                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9474                         },
9475                         _ => panic!("Unexpected event"),
9476                 }
9477         }
9478
9479         // Move the first channel through the funding flow...
9480         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
9481
9482         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
9483         check_added_monitors!(nodes[0], 0);
9484
9485         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9486         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9487         {
9488                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9489                 assert_eq!(added_monitors.len(), 1);
9490                 assert_eq!(added_monitors[0].0, funding_output);
9491                 added_monitors.clear();
9492         }
9493         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9494
9495         let funding_outpoint = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9496         let channel_id = funding_outpoint.to_channel_id();
9497
9498         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9499         // temporary one).
9500
9501         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9502         // Technically this is allowed by the spec, but we don't support it and there's little reason
9503         // to. Still, it shouldn't cause any other issues.
9504         open_chan_msg.temporary_channel_id = channel_id;
9505         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9506         {
9507                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9508                 assert_eq!(events.len(), 1);
9509                 match events[0] {
9510                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9511                                 // Technically, at this point, nodes[1] would be justified in thinking both
9512                                 // channels are closed, but currently we do not, so we just move forward with it.
9513                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
9514                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9515                         },
9516                         _ => panic!("Unexpected event"),
9517                 }
9518         }
9519
9520         // Now try to create a second channel which has a duplicate funding output.
9521         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9522         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9523         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_2_msg);
9524         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9525         create_funding_transaction(&nodes[0], 100000, 42); // Get and check the FundingGenerationReady event
9526
9527         let funding_created = {
9528                 let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
9529                 let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
9530                 let logger = test_utils::TestLogger::new();
9531                 as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
9532         };
9533         check_added_monitors!(nodes[0], 0);
9534         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9535         // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
9536         // still needs to be cleared here.
9537         check_added_monitors!(nodes[1], 1);
9538
9539         // ...still, nodes[1] will reject the duplicate channel.
9540         {
9541                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9542                 assert_eq!(events.len(), 1);
9543                 match events[0] {
9544                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9545                                 // Technically, at this point, nodes[1] would be justified in thinking both
9546                                 // channels are closed, but currently we do not, so we just move forward with it.
9547                                 assert_eq!(msg.channel_id, channel_id);
9548                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9549                         },
9550                         _ => panic!("Unexpected event"),
9551                 }
9552         }
9553
9554         // finally, finish creating the original channel and send a payment over it to make sure
9555         // everything is functional.
9556         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9557         {
9558                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9559                 assert_eq!(added_monitors.len(), 1);
9560                 assert_eq!(added_monitors[0].0, funding_output);
9561                 added_monitors.clear();
9562         }
9563
9564         let events_4 = nodes[0].node.get_and_clear_pending_events();
9565         assert_eq!(events_4.len(), 0);
9566         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9567         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
9568
9569         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9570         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
9571         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9572         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9573 }
9574
9575 #[test]
9576 fn test_error_chans_closed() {
9577         // Test that we properly handle error messages, closing appropriate channels.
9578         //
9579         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9580         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9581         // we can test various edge cases around it to ensure we don't regress.
9582         let chanmon_cfgs = create_chanmon_cfgs(3);
9583         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9584         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9585         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9586
9587         // Create some initial channels
9588         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9589         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9590         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9591
9592         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9593         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9594         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9595
9596         // Closing a channel from a different peer has no effect
9597         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9598         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9599
9600         // Closing one channel doesn't impact others
9601         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9602         check_added_monitors!(nodes[0], 1);
9603         check_closed_broadcast!(nodes[0], false);
9604         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9605         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9606         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9607         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9608
9609         // A null channel ID should close all channels
9610         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9611         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
9612         check_added_monitors!(nodes[0], 2);
9613         let events = nodes[0].node.get_and_clear_pending_msg_events();
9614         assert_eq!(events.len(), 2);
9615         match events[0] {
9616                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9617                         assert_eq!(msg.contents.flags & 2, 2);
9618                 },
9619                 _ => panic!("Unexpected event"),
9620         }
9621         match events[1] {
9622                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9623                         assert_eq!(msg.contents.flags & 2, 2);
9624                 },
9625                 _ => panic!("Unexpected event"),
9626         }
9627         // Note that at this point users of a standard PeerHandler will end up calling
9628         // peer_disconnected with no_connection_possible set to false, duplicating the
9629         // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
9630         // users with their own peer handling logic. We duplicate the call here, however.
9631         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9632         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9633
9634         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
9635         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9636         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9637 }
9638
9639 #[test]
9640 fn test_invalid_funding_tx() {
9641         // Test that we properly handle invalid funding transactions sent to us from a peer.
9642         //
9643         // Previously, all other major lightning implementations had failed to properly sanitize
9644         // funding transactions from their counterparties, leading to a multi-implementation critical
9645         // security vulnerability (though we always sanitized properly, we've previously had
9646         // un-released crashes in the sanitization process).
9647         let chanmon_cfgs = create_chanmon_cfgs(2);
9648         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9649         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9650         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9651
9652         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
9653         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9654         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9655
9656         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], 100_000, 42);
9657         for output in tx.output.iter_mut() {
9658                 // Make the confirmed funding transaction have a bogus script_pubkey
9659                 output.script_pubkey = bitcoin::Script::new();
9660         }
9661
9662         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, tx.clone(), 0).unwrap();
9663         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9664         check_added_monitors!(nodes[1], 1);
9665
9666         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9667         check_added_monitors!(nodes[0], 1);
9668
9669         let events_1 = nodes[0].node.get_and_clear_pending_events();
9670         assert_eq!(events_1.len(), 0);
9671
9672         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9673         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9674         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9675
9676         confirm_transaction_at(&nodes[1], &tx, 1);
9677         check_added_monitors!(nodes[1], 1);
9678         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9679         assert_eq!(events_2.len(), 1);
9680         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9681                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9682                 if let msgs::ErrorAction::SendErrorMessage { msg } = action {
9683                         assert_eq!(msg.data, "funding tx had wrong script/value or output index");
9684                 } else { panic!(); }
9685         } else { panic!(); }
9686         assert_eq!(nodes[1].node.list_channels().len(), 0);
9687 }
9688
9689 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9690         // In the first version of the chain::Confirm interface, after a refactor was made to not
9691         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9692         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9693         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9694         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9695         // spending transaction until height N+1 (or greater). This was due to the way
9696         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9697         // spending transaction at the height the input transaction was confirmed at, not whether we
9698         // should broadcast a spending transaction at the current height.
9699         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9700         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9701         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9702         // until we learned about an additional block.
9703         //
9704         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9705         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9706         let chanmon_cfgs = create_chanmon_cfgs(3);
9707         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9708         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9709         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9710         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9711
9712         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
9713         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
9714         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9715         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
9716         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
9717
9718         nodes[1].node.force_close_channel(&channel_id).unwrap();
9719         check_closed_broadcast!(nodes[1], true);
9720         check_added_monitors!(nodes[1], 1);
9721         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9722         assert_eq!(node_txn.len(), 1);
9723
9724         let conf_height = nodes[1].best_block_info().1;
9725         if !test_height_before_timelock {
9726                 connect_blocks(&nodes[1], 24 * 6);
9727         }
9728         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9729                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9730         if test_height_before_timelock {
9731                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9732                 // generate any events or broadcast any transactions
9733                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9734                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9735         } else {
9736                 // We should broadcast an HTLC transaction spending our funding transaction first
9737                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9738                 assert_eq!(spending_txn.len(), 2);
9739                 assert_eq!(spending_txn[0], node_txn[0]);
9740                 check_spends!(spending_txn[1], node_txn[0]);
9741                 // We should also generate a SpendableOutputs event with the to_self output (as its
9742                 // timelock is up).
9743                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9744                 assert_eq!(descriptor_spend_txn.len(), 1);
9745
9746                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9747                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9748                 // additional block built on top of the current chain.
9749                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9750                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9751                 expect_pending_htlcs_forwardable!(nodes[1]);
9752                 check_added_monitors!(nodes[1], 1);
9753
9754                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9755                 assert!(updates.update_add_htlcs.is_empty());
9756                 assert!(updates.update_fulfill_htlcs.is_empty());
9757                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9758                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9759                 assert!(updates.update_fee.is_none());
9760                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9761                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9762                 expect_payment_failed!(nodes[0], payment_hash, false);
9763                 expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true);
9764         }
9765 }
9766
9767 #[test]
9768 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9769         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9770         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9771 }
9772
9773 #[test]
9774 fn test_keysend_payments_to_public_node() {
9775         let chanmon_cfgs = create_chanmon_cfgs(2);
9776         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9777         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9778         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9779
9780         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9781         let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
9782         let payer_pubkey = nodes[0].node.get_our_node_id();
9783         let payee_pubkey = nodes[1].node.get_our_node_id();
9784         let route = get_route(&payer_pubkey, &network_graph, &payee_pubkey, None,
9785                         None, &vec![], 10000, 40,
9786                         nodes[0].logger).unwrap();
9787
9788         let test_preimage = PaymentPreimage([42; 32]);
9789         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
9790         check_added_monitors!(nodes[0], 1);
9791         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9792         assert_eq!(events.len(), 1);
9793         let event = events.pop().unwrap();
9794         let path = vec![&nodes[1]];
9795         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9796         claim_payment(&nodes[0], &path, test_preimage);
9797 }
9798
9799 #[test]
9800 fn test_keysend_payments_to_private_node() {
9801         let chanmon_cfgs = create_chanmon_cfgs(2);
9802         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9803         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9804         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9805
9806         let payer_pubkey = nodes[0].node.get_our_node_id();
9807         let payee_pubkey = nodes[1].node.get_our_node_id();
9808         nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
9809         nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
9810
9811         let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
9812         let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
9813         let first_hops = nodes[0].node.list_usable_channels();
9814         let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
9815                                 Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
9816                                 nodes[0].logger).unwrap();
9817
9818         let test_preimage = PaymentPreimage([42; 32]);
9819         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
9820         check_added_monitors!(nodes[0], 1);
9821         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9822         assert_eq!(events.len(), 1);
9823         let event = events.pop().unwrap();
9824         let path = vec![&nodes[1]];
9825         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9826         claim_payment(&nodes[0], &path, test_preimage);
9827 }
9828
9829 fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, at_forward: bool, on_holder_tx: bool) {
9830         // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` policy.
9831         //
9832         // At HTLC forward (`send_payment()`), if the sum of the trimmed-to-dust HTLC inbound and
9833         // trimmed-to-dust HTLC outbound balance and this new payment as included on next counterparty
9834         // commitment are above our `max_dust_htlc_exposure_msat`, we'll reject the update.
9835         // At HTLC reception (`update_add_htlc()`), if the sum of the trimmed-to-dust HTLC inbound
9836         // and trimmed-to-dust HTLC outbound balance and this new received HTLC as included on next
9837         // counterparty commitment are above our `max_dust_htlc_exposure_msat`, we'll fail the update.
9838         // Note, we return a `temporary_channel_failure` (0x1000 | 7), as the channel might be
9839         // available again for HTLC processing once the dust bandwidth has cleared up.
9840
9841         let chanmon_cfgs = create_chanmon_cfgs(2);
9842         let mut config = test_default_channel_config();
9843         config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
9844         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9845         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
9846         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9847
9848         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
9849         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9850         open_channel.max_htlc_value_in_flight_msat = 50_000_000;
9851         open_channel.max_accepted_htlcs = 60;
9852         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
9853         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9854         if on_holder_tx {
9855                 accept_channel.dust_limit_satoshis = 660;
9856         }
9857         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
9858
9859         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 1_000_000, 42);
9860
9861         if on_holder_tx {
9862                 if let Some(mut chan) = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&temporary_channel_id) {
9863                         chan.holder_dust_limit_satoshis = 660;
9864                 }
9865         }
9866
9867         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
9868         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9869         check_added_monitors!(nodes[1], 1);
9870
9871         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9872         check_added_monitors!(nodes[0], 1);
9873
9874         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9875         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
9876         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9877
9878         if on_holder_tx {
9879                 if dust_outbound_balance {
9880                         for i in 0..2 {
9881                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 2_300_000);
9882                                 if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
9883                         }
9884                 } else {
9885                         for _ in 0..2 {
9886                                 route_payment(&nodes[0], &[&nodes[1]], 2_300_000);
9887                         }
9888                 }
9889         } else {
9890                 if dust_outbound_balance {
9891                         for i in 0..25 {
9892                                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 200_000); // + 177_000 msat of HTLC-success tx at 253 sats/kWU
9893                                 if let Err(_) = nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)) { panic!("Unexpected event at dust HTLC {}", i); }
9894                         }
9895                 } else {
9896                         for _ in 0..25 {
9897                                 route_payment(&nodes[0], &[&nodes[1]], 200_000); // + 167_000 msat of HTLC-timeout tx at 253 sats/kWU
9898                         }
9899                 }
9900         }
9901
9902         if at_forward {
9903                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { 2_300_000 } else { 200_000 });
9904                 let mut config = UserConfig::default();
9905                 if on_holder_tx {
9906                         unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat)));
9907                 } else {
9908                         unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat)));
9909                 }
9910         } else {
9911                 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1 ], if on_holder_tx { 2_300_000 } else { 200_000 });
9912                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
9913                 check_added_monitors!(nodes[0], 1);
9914                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9915                 assert_eq!(events.len(), 1);
9916                 let payment_event = SendEvent::from_event(events.remove(0));
9917                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
9918                 if on_holder_tx {
9919                         nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", 6_900_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
9920                 } else {
9921                         nodes[1].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", 5_200_000, config.channel_options.max_dust_htlc_exposure_msat), 1);
9922                 }
9923         }
9924
9925         let _ = nodes[1].node.get_and_clear_pending_msg_events();
9926         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9927         added_monitors.clear();
9928 }
9929
9930 #[test]
9931 fn test_max_dust_htlc_exposure() {
9932         do_test_max_dust_htlc_exposure(true, true, true);
9933         do_test_max_dust_htlc_exposure(false, true, true);
9934         do_test_max_dust_htlc_exposure(false, false, true);
9935         do_test_max_dust_htlc_exposure(false, false, false);
9936         do_test_max_dust_htlc_exposure(true, true, false);
9937         do_test_max_dust_htlc_exposure(true, false, false);
9938         do_test_max_dust_htlc_exposure(true, false, true);
9939         do_test_max_dust_htlc_exposure(false, true, false);
9940 }