Actual no_std support
[rust-lightning] / lightning / src / ln / functional_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test standing up a network of ChannelManagers, creating channels, sending
11 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
12 //! claim outputs on-chain.
13
14 use chain;
15 use chain::{Confirm, Listen, Watch};
16 use chain::channelmonitor;
17 use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
18 use chain::transaction::OutPoint;
19 use chain::keysinterface::{KeysInterface, BaseSign};
20 use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
21 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
22 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
23 use ln::channel::{Channel, ChannelError};
24 use ln::{chan_utils, onion_utils};
25 use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT;
26 use routing::router::{Route, RouteHop, RouteHint, RouteHintHop, get_route, get_keysend_route};
27 use routing::network_graph::RoutingFees;
28 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
29 use ln::msgs;
30 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction};
31 use util::enforcing_trait_impls::EnforcingSigner;
32 use util::{byte_utils, test_utils};
33 use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
34 use util::errors::APIError;
35 use util::ser::{Writeable, ReadableArgs};
36 use util::config::UserConfig;
37
38 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
39 use bitcoin::hash_types::{Txid, BlockHash};
40 use bitcoin::blockdata::block::{Block, BlockHeader};
41 use bitcoin::blockdata::script::Builder;
42 use bitcoin::blockdata::opcodes;
43 use bitcoin::blockdata::constants::genesis_block;
44 use bitcoin::network::constants::Network;
45
46 use bitcoin::hashes::sha256::Hash as Sha256;
47 use bitcoin::hashes::Hash;
48
49 use bitcoin::secp256k1::{Secp256k1, Message};
50 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
51
52 use regex;
53
54 use io;
55 use prelude::*;
56 use alloc::collections::BTreeSet;
57 use core::default::Default;
58 use sync::{Arc, Mutex};
59
60 use ln::functional_test_utils::*;
61 use ln::chan_utils::CommitmentTransaction;
62 use ln::msgs::OptionalField::Present;
63
64 #[test]
65 fn test_insane_channel_opens() {
66         // Stand up a network of 2 nodes
67         let chanmon_cfgs = create_chanmon_cfgs(2);
68         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
69         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
70         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
71
72         // Instantiate channel parameters where we push the maximum msats given our
73         // funding satoshis
74         let channel_value_sat = 31337; // same as funding satoshis
75         let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
76         let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
77
78         // Have node0 initiate a channel to node1 with aforementioned parameters
79         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
80
81         // Extract the channel open message from node0 to node1
82         let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
83
84         // Test helper that asserts we get the correct error string given a mutator
85         // that supposedly makes the channel open message insane
86         let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
87                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &message_mutator(open_channel_message.clone()));
88                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
89                 assert_eq!(msg_events.len(), 1);
90                 let expected_regex = regex::Regex::new(expected_error_str).unwrap();
91                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
92                         match action {
93                                 &ErrorAction::SendErrorMessage { .. } => {
94                                         nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1);
95                                 },
96                                 _ => panic!("unexpected event!"),
97                         }
98                 } else { assert!(false); }
99         };
100
101         use ln::channel::MAX_FUNDING_SATOSHIS;
102         use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
103
104         // Test all mutations that would make the channel open message insane
105         insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
106
107         insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
108
109         insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
110
111         insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
112
113         insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
114
115         insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
116
117         insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
118
119         insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
120
121         insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
122 }
123
124 #[test]
125 fn test_async_inbound_update_fee() {
126         let chanmon_cfgs = create_chanmon_cfgs(2);
127         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
128         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
129         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
130         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
131         let logger = test_utils::TestLogger::new();
132         let channel_id = chan.2;
133
134         // balancing
135         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
136
137         // A                                        B
138         // update_fee                            ->
139         // send (1) commitment_signed            -.
140         //                                       <- update_add_htlc/commitment_signed
141         // send (2) RAA (awaiting remote revoke) -.
142         // (1) commitment_signed is delivered    ->
143         //                                       .- send (3) RAA (awaiting remote revoke)
144         // (2) RAA is delivered                  ->
145         //                                       .- send (4) commitment_signed
146         //                                       <- (3) RAA is delivered
147         // send (5) commitment_signed            -.
148         //                                       <- (4) commitment_signed is delivered
149         // send (6) RAA                          -.
150         // (5) commitment_signed is delivered    ->
151         //                                       <- RAA
152         // (6) RAA is delivered                  ->
153
154         // First nodes[0] generates an update_fee
155         nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
156         check_added_monitors!(nodes[0], 1);
157
158         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
159         assert_eq!(events_0.len(), 1);
160         let (update_msg, commitment_signed) = match events_0[0] { // (1)
161                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
162                         (update_fee.as_ref(), commitment_signed)
163                 },
164                 _ => panic!("Unexpected event"),
165         };
166
167         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
168
169         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
170         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
171         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
172         nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap();
173         check_added_monitors!(nodes[1], 1);
174
175         let payment_event = {
176                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
177                 assert_eq!(events_1.len(), 1);
178                 SendEvent::from_event(events_1.remove(0))
179         };
180         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
181         assert_eq!(payment_event.msgs.len(), 1);
182
183         // ...now when the messages get delivered everyone should be happy
184         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
185         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
186         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
187         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
188         check_added_monitors!(nodes[0], 1);
189
190         // deliver(1), generate (3):
191         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
192         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
193         // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
194         check_added_monitors!(nodes[1], 1);
195
196         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2)
197         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
198         assert!(bs_update.update_add_htlcs.is_empty()); // (4)
199         assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
200         assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
201         assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
202         assert!(bs_update.update_fee.is_none()); // (4)
203         check_added_monitors!(nodes[1], 1);
204
205         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3)
206         let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
207         assert!(as_update.update_add_htlcs.is_empty()); // (5)
208         assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
209         assert!(as_update.update_fail_htlcs.is_empty()); // (5)
210         assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
211         assert!(as_update.update_fee.is_none()); // (5)
212         check_added_monitors!(nodes[0], 1);
213
214         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4)
215         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
216         // only (6) so get_event_msg's assert(len == 1) passes
217         check_added_monitors!(nodes[0], 1);
218
219         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5)
220         let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
221         check_added_monitors!(nodes[1], 1);
222
223         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
224         check_added_monitors!(nodes[0], 1);
225
226         let events_2 = nodes[0].node.get_and_clear_pending_events();
227         assert_eq!(events_2.len(), 1);
228         match events_2[0] {
229                 Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
230                 _ => panic!("Unexpected event"),
231         }
232
233         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6)
234         check_added_monitors!(nodes[1], 1);
235 }
236
237 #[test]
238 fn test_update_fee_unordered_raa() {
239         // Just the intro to the previous test followed by an out-of-order RAA (which caused a
240         // crash in an earlier version of the update_fee patch)
241         let chanmon_cfgs = create_chanmon_cfgs(2);
242         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
243         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
244         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
245         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
246         let channel_id = chan.2;
247         let logger = test_utils::TestLogger::new();
248
249         // balancing
250         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
251
252         // First nodes[0] generates an update_fee
253         nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
254         check_added_monitors!(nodes[0], 1);
255
256         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
257         assert_eq!(events_0.len(), 1);
258         let update_msg = match events_0[0] { // (1)
259                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
260                         update_fee.as_ref()
261                 },
262                 _ => panic!("Unexpected event"),
263         };
264
265         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
266
267         // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
268         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
269         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
270         nodes[1].node.send_payment(&get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 40000, TEST_FINAL_CLTV, &logger).unwrap(), our_payment_hash, &Some(our_payment_secret)).unwrap();
271         check_added_monitors!(nodes[1], 1);
272
273         let payment_event = {
274                 let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
275                 assert_eq!(events_1.len(), 1);
276                 SendEvent::from_event(events_1.remove(0))
277         };
278         assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
279         assert_eq!(payment_event.msgs.len(), 1);
280
281         // ...now when the messages get delivered everyone should be happy
282         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
283         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2)
284         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
285         // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
286         check_added_monitors!(nodes[0], 1);
287
288         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2)
289         check_added_monitors!(nodes[1], 1);
290
291         // We can't continue, sadly, because our (1) now has a bogus signature
292 }
293
294 #[test]
295 fn test_multi_flight_update_fee() {
296         let chanmon_cfgs = create_chanmon_cfgs(2);
297         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
298         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
299         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
300         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
301         let channel_id = chan.2;
302
303         // A                                        B
304         // update_fee/commitment_signed          ->
305         //                                       .- send (1) RAA and (2) commitment_signed
306         // update_fee (never committed)          ->
307         // (3) update_fee                        ->
308         // We have to manually generate the above update_fee, it is allowed by the protocol but we
309         // don't track which updates correspond to which revoke_and_ack responses so we're in
310         // AwaitingRAA mode and will not generate the update_fee yet.
311         //                                       <- (1) RAA delivered
312         // (3) is generated and send (4) CS      -.
313         // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
314         // know the per_commitment_point to use for it.
315         //                                       <- (2) commitment_signed delivered
316         // revoke_and_ack                        ->
317         //                                          B should send no response here
318         // (4) commitment_signed delivered       ->
319         //                                       <- RAA/commitment_signed delivered
320         // revoke_and_ack                        ->
321
322         // First nodes[0] generates an update_fee
323         let initial_feerate = get_feerate!(nodes[0], channel_id);
324         nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
325         check_added_monitors!(nodes[0], 1);
326
327         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
328         assert_eq!(events_0.len(), 1);
329         let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
330                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
331                         (update_fee.as_ref().unwrap(), commitment_signed)
332                 },
333                 _ => panic!("Unexpected event"),
334         };
335
336         // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
337         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1);
338         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1);
339         let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
340         check_added_monitors!(nodes[1], 1);
341
342         // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
343         // transaction:
344         nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
345         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
346         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
347
348         // Create the (3) update_fee message that nodes[0] will generate before it does...
349         let mut update_msg_2 = msgs::UpdateFee {
350                 channel_id: update_msg_1.channel_id.clone(),
351                 feerate_per_kw: (initial_feerate + 30) as u32,
352         };
353
354         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
355
356         update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
357         // Deliver (3)
358         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2);
359
360         // Deliver (1), generating (3) and (4)
361         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg);
362         let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
363         check_added_monitors!(nodes[0], 1);
364         assert!(as_second_update.update_add_htlcs.is_empty());
365         assert!(as_second_update.update_fulfill_htlcs.is_empty());
366         assert!(as_second_update.update_fail_htlcs.is_empty());
367         assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
368         // Check that the update_fee newly generated matches what we delivered:
369         assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
370         assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
371
372         // Deliver (2) commitment_signed
373         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
374         let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
375         check_added_monitors!(nodes[0], 1);
376         // No commitment_signed so get_event_msg's assert(len == 1) passes
377
378         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg);
379         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
380         check_added_monitors!(nodes[1], 1);
381
382         // Delever (4)
383         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
384         let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
385         check_added_monitors!(nodes[1], 1);
386
387         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke);
388         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
389         check_added_monitors!(nodes[0], 1);
390
391         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment);
392         let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
393         // No commitment_signed so get_event_msg's assert(len == 1) passes
394         check_added_monitors!(nodes[0], 1);
395
396         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke);
397         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
398         check_added_monitors!(nodes[1], 1);
399 }
400
401 fn do_test_1_conf_open(connect_style: ConnectStyle) {
402         // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
403         // tests that we properly send one in that case.
404         let mut alice_config = UserConfig::default();
405         alice_config.own_channel_config.minimum_depth = 1;
406         alice_config.channel_options.announced_channel = true;
407         alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
408         let mut bob_config = UserConfig::default();
409         bob_config.own_channel_config.minimum_depth = 1;
410         bob_config.channel_options.announced_channel = true;
411         bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
412         let chanmon_cfgs = create_chanmon_cfgs(2);
413         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
414         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
415         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
416         *nodes[0].connect_style.borrow_mut() = connect_style;
417
418         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
419         mine_transaction(&nodes[1], &tx);
420         nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
421
422         mine_transaction(&nodes[0], &tx);
423         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
424         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
425
426         for node in nodes {
427                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
428                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
429                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
430         }
431 }
432 #[test]
433 fn test_1_conf_open() {
434         do_test_1_conf_open(ConnectStyle::BestBlockFirst);
435         do_test_1_conf_open(ConnectStyle::TransactionsFirst);
436         do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
437 }
438
439 fn do_test_sanity_on_in_flight_opens(steps: u8) {
440         // Previously, we had issues deserializing channels when we hadn't connected the first block
441         // after creation. To catch that and similar issues, we lean on the Node::drop impl to test
442         // serialization round-trips and simply do steps towards opening a channel and then drop the
443         // Node objects.
444
445         let chanmon_cfgs = create_chanmon_cfgs(2);
446         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
447         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
448         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
449
450         if steps & 0b1000_0000 != 0{
451                 let block = Block {
452                         header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
453                         txdata: vec![],
454                 };
455                 connect_block(&nodes[0], &block);
456                 connect_block(&nodes[1], &block);
457         }
458
459         if steps & 0x0f == 0 { return; }
460         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
461         let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
462
463         if steps & 0x0f == 1 { return; }
464         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
465         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
466
467         if steps & 0x0f == 2 { return; }
468         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
469
470         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
471
472         if steps & 0x0f == 3 { return; }
473         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
474         check_added_monitors!(nodes[0], 0);
475         let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
476
477         if steps & 0x0f == 4 { return; }
478         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
479         {
480                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
481                 assert_eq!(added_monitors.len(), 1);
482                 assert_eq!(added_monitors[0].0, funding_output);
483                 added_monitors.clear();
484         }
485         let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
486
487         if steps & 0x0f == 5 { return; }
488         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
489         {
490                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
491                 assert_eq!(added_monitors.len(), 1);
492                 assert_eq!(added_monitors[0].0, funding_output);
493                 added_monitors.clear();
494         }
495
496         let events_4 = nodes[0].node.get_and_clear_pending_events();
497         assert_eq!(events_4.len(), 0);
498
499         if steps & 0x0f == 6 { return; }
500         create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
501
502         if steps & 0x0f == 7 { return; }
503         confirm_transaction_at(&nodes[0], &tx, 2);
504         connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
505         create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
506 }
507
508 #[test]
509 fn test_sanity_on_in_flight_opens() {
510         do_test_sanity_on_in_flight_opens(0);
511         do_test_sanity_on_in_flight_opens(0 | 0b1000_0000);
512         do_test_sanity_on_in_flight_opens(1);
513         do_test_sanity_on_in_flight_opens(1 | 0b1000_0000);
514         do_test_sanity_on_in_flight_opens(2);
515         do_test_sanity_on_in_flight_opens(2 | 0b1000_0000);
516         do_test_sanity_on_in_flight_opens(3);
517         do_test_sanity_on_in_flight_opens(3 | 0b1000_0000);
518         do_test_sanity_on_in_flight_opens(4);
519         do_test_sanity_on_in_flight_opens(4 | 0b1000_0000);
520         do_test_sanity_on_in_flight_opens(5);
521         do_test_sanity_on_in_flight_opens(5 | 0b1000_0000);
522         do_test_sanity_on_in_flight_opens(6);
523         do_test_sanity_on_in_flight_opens(6 | 0b1000_0000);
524         do_test_sanity_on_in_flight_opens(7);
525         do_test_sanity_on_in_flight_opens(7 | 0b1000_0000);
526         do_test_sanity_on_in_flight_opens(8);
527         do_test_sanity_on_in_flight_opens(8 | 0b1000_0000);
528 }
529
530 #[test]
531 fn test_update_fee_vanilla() {
532         let chanmon_cfgs = create_chanmon_cfgs(2);
533         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
534         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
535         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
536         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
537         let channel_id = chan.2;
538
539         let feerate = get_feerate!(nodes[0], channel_id);
540         nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
541         check_added_monitors!(nodes[0], 1);
542
543         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
544         assert_eq!(events_0.len(), 1);
545         let (update_msg, commitment_signed) = match events_0[0] {
546                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
547                         (update_fee.as_ref(), commitment_signed)
548                 },
549                 _ => panic!("Unexpected event"),
550         };
551         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
552
553         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
554         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
555         check_added_monitors!(nodes[1], 1);
556
557         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
558         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
559         check_added_monitors!(nodes[0], 1);
560
561         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
562         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
563         // No commitment_signed so get_event_msg's assert(len == 1) passes
564         check_added_monitors!(nodes[0], 1);
565
566         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
567         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
568         check_added_monitors!(nodes[1], 1);
569 }
570
571 #[test]
572 fn test_update_fee_that_funder_cannot_afford() {
573         let chanmon_cfgs = create_chanmon_cfgs(2);
574         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
575         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
576         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
577         let channel_value = 1888;
578         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, InitFeatures::known(), InitFeatures::known());
579         let channel_id = chan.2;
580
581         let feerate = 260;
582         nodes[0].node.update_fee(channel_id, feerate).unwrap();
583         check_added_monitors!(nodes[0], 1);
584         let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
585
586         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap());
587
588         commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
589
590         //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
591         //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
592         {
593                 let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
594
595                 //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
596                 let num_htlcs = commitment_tx.output.len() - 2;
597                 let total_fee: u64 = feerate as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
598                 let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
599                 actual_fee = channel_value - actual_fee;
600                 assert_eq!(total_fee, actual_fee);
601         }
602
603         //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
604         //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
605         nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
606         check_added_monitors!(nodes[0], 1);
607
608         let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
609
610         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap());
611
612         //While producing the commitment_signed response after handling a received update_fee request the
613         //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
614         //Should produce and error.
615         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed);
616         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
617         check_added_monitors!(nodes[1], 1);
618         check_closed_broadcast!(nodes[1], true);
619 }
620
621 #[test]
622 fn test_update_fee_with_fundee_update_add_htlc() {
623         let chanmon_cfgs = create_chanmon_cfgs(2);
624         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
625         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
626         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
627         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
628         let channel_id = chan.2;
629         let logger = test_utils::TestLogger::new();
630
631         // balancing
632         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
633
634         let feerate = get_feerate!(nodes[0], channel_id);
635         nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
636         check_added_monitors!(nodes[0], 1);
637
638         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
639         assert_eq!(events_0.len(), 1);
640         let (update_msg, commitment_signed) = match events_0[0] {
641                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
642                         (update_fee.as_ref(), commitment_signed)
643                 },
644                 _ => panic!("Unexpected event"),
645         };
646         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
647         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
648         let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
649         check_added_monitors!(nodes[1], 1);
650
651         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[0]);
652         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
653         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800000, TEST_FINAL_CLTV, &logger).unwrap();
654
655         // nothing happens since node[1] is in AwaitingRemoteRevoke
656         nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
657         {
658                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
659                 assert_eq!(added_monitors.len(), 0);
660                 added_monitors.clear();
661         }
662         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
663         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
664         // node[1] has nothing to do
665
666         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
667         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
668         check_added_monitors!(nodes[0], 1);
669
670         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
671         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
672         // No commitment_signed so get_event_msg's assert(len == 1) passes
673         check_added_monitors!(nodes[0], 1);
674         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
675         check_added_monitors!(nodes[1], 1);
676         // AwaitingRemoteRevoke ends here
677
678         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
679         assert_eq!(commitment_update.update_add_htlcs.len(), 1);
680         assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
681         assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
682         assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
683         assert_eq!(commitment_update.update_fee.is_none(), true);
684
685         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]);
686         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
687         check_added_monitors!(nodes[0], 1);
688         let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
689
690         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke);
691         check_added_monitors!(nodes[1], 1);
692         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
693
694         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
695         check_added_monitors!(nodes[1], 1);
696         let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
697         // No commitment_signed so get_event_msg's assert(len == 1) passes
698
699         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke);
700         check_added_monitors!(nodes[0], 1);
701         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
702
703         expect_pending_htlcs_forwardable!(nodes[0]);
704
705         let events = nodes[0].node.get_and_clear_pending_events();
706         assert_eq!(events.len(), 1);
707         match events[0] {
708                 Event::PaymentReceived { .. } => { },
709                 _ => panic!("Unexpected event"),
710         };
711
712         claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
713
714         send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
715         send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
716         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
717 }
718
719 #[test]
720 fn test_update_fee() {
721         let chanmon_cfgs = create_chanmon_cfgs(2);
722         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
723         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
724         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
725         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
726         let channel_id = chan.2;
727
728         // A                                        B
729         // (1) update_fee/commitment_signed      ->
730         //                                       <- (2) revoke_and_ack
731         //                                       .- send (3) commitment_signed
732         // (4) update_fee/commitment_signed      ->
733         //                                       .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
734         //                                       <- (3) commitment_signed delivered
735         // send (6) revoke_and_ack               -.
736         //                                       <- (5) deliver revoke_and_ack
737         // (6) deliver revoke_and_ack            ->
738         //                                       .- send (7) commitment_signed in response to (4)
739         //                                       <- (7) deliver commitment_signed
740         // revoke_and_ack                        ->
741
742         // Create and deliver (1)...
743         let feerate = get_feerate!(nodes[0], channel_id);
744         nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
745         check_added_monitors!(nodes[0], 1);
746
747         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
748         assert_eq!(events_0.len(), 1);
749         let (update_msg, commitment_signed) = match events_0[0] {
750                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
751                         (update_fee.as_ref(), commitment_signed)
752                 },
753                 _ => panic!("Unexpected event"),
754         };
755         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
756
757         // Generate (2) and (3):
758         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
759         let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
760         check_added_monitors!(nodes[1], 1);
761
762         // Deliver (2):
763         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
764         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
765         check_added_monitors!(nodes[0], 1);
766
767         // Create and deliver (4)...
768         nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
769         check_added_monitors!(nodes[0], 1);
770         let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
771         assert_eq!(events_0.len(), 1);
772         let (update_msg, commitment_signed) = match events_0[0] {
773                         MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
774                         (update_fee.as_ref(), commitment_signed)
775                 },
776                 _ => panic!("Unexpected event"),
777         };
778
779         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
780         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
781         check_added_monitors!(nodes[1], 1);
782         // ... creating (5)
783         let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
784         // No commitment_signed so get_event_msg's assert(len == 1) passes
785
786         // Handle (3), creating (6):
787         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0);
788         check_added_monitors!(nodes[0], 1);
789         let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
790         // No commitment_signed so get_event_msg's assert(len == 1) passes
791
792         // Deliver (5):
793         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg);
794         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
795         check_added_monitors!(nodes[0], 1);
796
797         // Deliver (6), creating (7):
798         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0);
799         let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
800         assert!(commitment_update.update_add_htlcs.is_empty());
801         assert!(commitment_update.update_fulfill_htlcs.is_empty());
802         assert!(commitment_update.update_fail_htlcs.is_empty());
803         assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
804         assert!(commitment_update.update_fee.is_none());
805         check_added_monitors!(nodes[1], 1);
806
807         // Deliver (7)
808         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed);
809         check_added_monitors!(nodes[0], 1);
810         let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
811         // No commitment_signed so get_event_msg's assert(len == 1) passes
812
813         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg);
814         check_added_monitors!(nodes[1], 1);
815         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
816
817         assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
818         assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
819         close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
820 }
821
822 #[test]
823 fn pre_funding_lock_shutdown_test() {
824         // Test sending a shutdown prior to funding_locked after funding generation
825         let chanmon_cfgs = create_chanmon_cfgs(2);
826         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
827         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
828         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
829         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known());
830         mine_transaction(&nodes[0], &tx);
831         mine_transaction(&nodes[1], &tx);
832
833         nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
834         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
835         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
836         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
837         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
838
839         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
840         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
841         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
842         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
843         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
844         assert!(node_0_none.is_none());
845
846         assert!(nodes[0].node.list_channels().is_empty());
847         assert!(nodes[1].node.list_channels().is_empty());
848 }
849
850 #[test]
851 fn updates_shutdown_wait() {
852         // Test sending a shutdown with outstanding updates pending
853         let chanmon_cfgs = create_chanmon_cfgs(3);
854         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
855         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
856         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
857         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
858         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
859         let logger = test_utils::TestLogger::new();
860
861         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
862
863         nodes[0].node.close_channel(&chan_1.2).unwrap();
864         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
865         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
866         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
867         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
868
869         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
870         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
871
872         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
873
874         let net_graph_msg_handler0 = &nodes[0].net_graph_msg_handler;
875         let net_graph_msg_handler1 = &nodes[1].net_graph_msg_handler;
876         let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler0.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
877         let route_2 = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler1.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
878         unwrap_send_err!(nodes[0].node.send_payment(&route_1, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
879         unwrap_send_err!(nodes[1].node.send_payment(&route_2, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable {..}, {});
880
881         assert!(nodes[2].node.claim_funds(our_payment_preimage));
882         check_added_monitors!(nodes[2], 1);
883         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
884         assert!(updates.update_add_htlcs.is_empty());
885         assert!(updates.update_fail_htlcs.is_empty());
886         assert!(updates.update_fail_malformed_htlcs.is_empty());
887         assert!(updates.update_fee.is_none());
888         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
889         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
890         check_added_monitors!(nodes[1], 1);
891         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
892         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
893
894         assert!(updates_2.update_add_htlcs.is_empty());
895         assert!(updates_2.update_fail_htlcs.is_empty());
896         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
897         assert!(updates_2.update_fee.is_none());
898         assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
899         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
900         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
901
902         let events = nodes[0].node.get_and_clear_pending_events();
903         assert_eq!(events.len(), 1);
904         match events[0] {
905                 Event::PaymentSent { ref payment_preimage } => {
906                         assert_eq!(our_payment_preimage, *payment_preimage);
907                 },
908                 _ => panic!("Unexpected event"),
909         }
910
911         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
912         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
913         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
914         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
915         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
916         assert!(node_0_none.is_none());
917
918         assert!(nodes[0].node.list_channels().is_empty());
919
920         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
921         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
922         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
923         assert!(nodes[1].node.list_channels().is_empty());
924         assert!(nodes[2].node.list_channels().is_empty());
925 }
926
927 #[test]
928 fn htlc_fail_async_shutdown() {
929         // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
930         let chanmon_cfgs = create_chanmon_cfgs(3);
931         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
932         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
933         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
934         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
935         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
936         let logger = test_utils::TestLogger::new();
937
938         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
939         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
940         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
941         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
942         check_added_monitors!(nodes[0], 1);
943         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
944         assert_eq!(updates.update_add_htlcs.len(), 1);
945         assert!(updates.update_fulfill_htlcs.is_empty());
946         assert!(updates.update_fail_htlcs.is_empty());
947         assert!(updates.update_fail_malformed_htlcs.is_empty());
948         assert!(updates.update_fee.is_none());
949
950         nodes[1].node.close_channel(&chan_1.2).unwrap();
951         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
952         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
953         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
954
955         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
956         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
957         check_added_monitors!(nodes[1], 1);
958         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
959         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
960
961         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
962         assert!(updates_2.update_add_htlcs.is_empty());
963         assert!(updates_2.update_fulfill_htlcs.is_empty());
964         assert_eq!(updates_2.update_fail_htlcs.len(), 1);
965         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
966         assert!(updates_2.update_fee.is_none());
967
968         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]);
969         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
970
971         expect_payment_failed!(nodes[0], our_payment_hash, false);
972
973         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
974         assert_eq!(msg_events.len(), 2);
975         let node_0_closing_signed = match msg_events[0] {
976                 MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
977                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
978                         (*msg).clone()
979                 },
980                 _ => panic!("Unexpected event"),
981         };
982         match msg_events[1] {
983                 MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
984                         assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
985                 },
986                 _ => panic!("Unexpected event"),
987         }
988
989         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
990         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
991         let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
992         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
993         let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
994         assert!(node_0_none.is_none());
995
996         assert!(nodes[0].node.list_channels().is_empty());
997
998         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
999         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
1000         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
1001         assert!(nodes[1].node.list_channels().is_empty());
1002         assert!(nodes[2].node.list_channels().is_empty());
1003 }
1004
1005 fn do_test_shutdown_rebroadcast(recv_count: u8) {
1006         // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
1007         // messages delivered prior to disconnect
1008         let chanmon_cfgs = create_chanmon_cfgs(3);
1009         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1010         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1011         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1012         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1013         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1014
1015         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
1016
1017         nodes[1].node.close_channel(&chan_1.2).unwrap();
1018         let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1019         if recv_count > 0 {
1020                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
1021                 let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1022                 if recv_count > 1 {
1023                         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
1024                 }
1025         }
1026
1027         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1028         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1029
1030         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1031         let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1032         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1033         let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1034
1035         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish);
1036         let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1037         assert!(node_1_shutdown == node_1_2nd_shutdown);
1038
1039         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
1040         let node_0_2nd_shutdown = if recv_count > 0 {
1041                 let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1042                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
1043                 node_0_2nd_shutdown
1044         } else {
1045                 let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1046                 assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
1047                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
1048                 get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
1049         };
1050         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
1051
1052         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1053         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1054
1055         assert!(nodes[2].node.claim_funds(our_payment_preimage));
1056         check_added_monitors!(nodes[2], 1);
1057         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1058         assert!(updates.update_add_htlcs.is_empty());
1059         assert!(updates.update_fail_htlcs.is_empty());
1060         assert!(updates.update_fail_malformed_htlcs.is_empty());
1061         assert!(updates.update_fee.is_none());
1062         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1063         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1064         check_added_monitors!(nodes[1], 1);
1065         let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1066         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1067
1068         assert!(updates_2.update_add_htlcs.is_empty());
1069         assert!(updates_2.update_fail_htlcs.is_empty());
1070         assert!(updates_2.update_fail_malformed_htlcs.is_empty());
1071         assert!(updates_2.update_fee.is_none());
1072         assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
1073         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]);
1074         commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
1075
1076         let events = nodes[0].node.get_and_clear_pending_events();
1077         assert_eq!(events.len(), 1);
1078         match events[0] {
1079                 Event::PaymentSent { ref payment_preimage } => {
1080                         assert_eq!(our_payment_preimage, *payment_preimage);
1081                 },
1082                 _ => panic!("Unexpected event"),
1083         }
1084
1085         let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
1086         if recv_count > 0 {
1087                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
1088                 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1089                 assert!(node_1_closing_signed.is_some());
1090         }
1091
1092         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1093         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1094
1095         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1096         let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
1097         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
1098         if recv_count == 0 {
1099                 // If all closing_signeds weren't delivered we can just resume where we left off...
1100                 let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
1101
1102                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish);
1103                 let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
1104                 assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
1105
1106                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
1107                 let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
1108                 assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
1109
1110                 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
1111                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1112
1113                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
1114                 let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
1115                 assert!(node_0_closing_signed == node_0_2nd_closing_signed);
1116
1117                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed);
1118                 let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
1119                 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap());
1120                 let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
1121                 assert!(node_0_none.is_none());
1122         } else {
1123                 // If one node, however, received + responded with an identical closing_signed we end
1124                 // up erroring and node[0] will try to broadcast its own latest commitment transaction.
1125                 // There isn't really anything better we can do simply, but in the future we might
1126                 // explore storing a set of recently-closed channels that got disconnected during
1127                 // closing_signed and avoiding broadcasting local commitment txn for some timeout to
1128                 // give our counterparty enough time to (potentially) broadcast a cooperative closing
1129                 // transaction.
1130                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1131
1132                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish);
1133                 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
1134                 assert_eq!(msg_events.len(), 1);
1135                 if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
1136                         match action {
1137                                 &ErrorAction::SendErrorMessage { ref msg } => {
1138                                         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
1139                                         assert_eq!(msg.channel_id, chan_1.2);
1140                                 },
1141                                 _ => panic!("Unexpected event!"),
1142                         }
1143                 } else { panic!("Needed SendErrorMessage close"); }
1144
1145                 // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
1146                 // checks it, but in this case nodes[0] didn't ever get a chance to receive a
1147                 // closing_signed so we do it ourselves
1148                 check_closed_broadcast!(nodes[0], false);
1149                 check_added_monitors!(nodes[0], 1);
1150         }
1151
1152         assert!(nodes[0].node.list_channels().is_empty());
1153
1154         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1155         nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
1156         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
1157         assert!(nodes[1].node.list_channels().is_empty());
1158         assert!(nodes[2].node.list_channels().is_empty());
1159 }
1160
1161 #[test]
1162 fn test_shutdown_rebroadcast() {
1163         do_test_shutdown_rebroadcast(0);
1164         do_test_shutdown_rebroadcast(1);
1165         do_test_shutdown_rebroadcast(2);
1166 }
1167
1168 #[test]
1169 fn fake_network_test() {
1170         // Simple test which builds a network of ChannelManagers, connects them to each other, and
1171         // tests that payments get routed and transactions broadcast in semi-reasonable ways.
1172         let chanmon_cfgs = create_chanmon_cfgs(4);
1173         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1174         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1175         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1176
1177         // Create some initial channels
1178         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1179         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1180         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
1181
1182         // Rebalance the network a bit by relaying one payment through all the channels...
1183         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1184         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1185         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1186         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
1187
1188         // Send some more payments
1189         send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
1190         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
1191         send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
1192
1193         // Test failure packets
1194         let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
1195         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
1196
1197         // Add a new channel that skips 3
1198         let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1199
1200         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
1201         send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
1202         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1203         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1204         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1205         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1206         send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
1207
1208         // Do some rebalance loop payments, simultaneously
1209         let mut hops = Vec::with_capacity(3);
1210         hops.push(RouteHop {
1211                 pubkey: nodes[2].node.get_our_node_id(),
1212                 node_features: NodeFeatures::empty(),
1213                 short_channel_id: chan_2.0.contents.short_channel_id,
1214                 channel_features: ChannelFeatures::empty(),
1215                 fee_msat: 0,
1216                 cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
1217         });
1218         hops.push(RouteHop {
1219                 pubkey: nodes[3].node.get_our_node_id(),
1220                 node_features: NodeFeatures::empty(),
1221                 short_channel_id: chan_3.0.contents.short_channel_id,
1222                 channel_features: ChannelFeatures::empty(),
1223                 fee_msat: 0,
1224                 cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
1225         });
1226         hops.push(RouteHop {
1227                 pubkey: nodes[1].node.get_our_node_id(),
1228                 node_features: NodeFeatures::known(),
1229                 short_channel_id: chan_4.0.contents.short_channel_id,
1230                 channel_features: ChannelFeatures::known(),
1231                 fee_msat: 1000000,
1232                 cltv_expiry_delta: TEST_FINAL_CLTV,
1233         });
1234         hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1235         hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1236         let payment_preimage_1 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
1237
1238         let mut hops = Vec::with_capacity(3);
1239         hops.push(RouteHop {
1240                 pubkey: nodes[3].node.get_our_node_id(),
1241                 node_features: NodeFeatures::empty(),
1242                 short_channel_id: chan_4.0.contents.short_channel_id,
1243                 channel_features: ChannelFeatures::empty(),
1244                 fee_msat: 0,
1245                 cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
1246         });
1247         hops.push(RouteHop {
1248                 pubkey: nodes[2].node.get_our_node_id(),
1249                 node_features: NodeFeatures::empty(),
1250                 short_channel_id: chan_3.0.contents.short_channel_id,
1251                 channel_features: ChannelFeatures::empty(),
1252                 fee_msat: 0,
1253                 cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
1254         });
1255         hops.push(RouteHop {
1256                 pubkey: nodes[1].node.get_our_node_id(),
1257                 node_features: NodeFeatures::known(),
1258                 short_channel_id: chan_2.0.contents.short_channel_id,
1259                 channel_features: ChannelFeatures::known(),
1260                 fee_msat: 1000000,
1261                 cltv_expiry_delta: TEST_FINAL_CLTV,
1262         });
1263         hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
1264         hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
1265         let payment_hash_2 = send_along_route(&nodes[1], Route { paths: vec![hops] }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
1266
1267         // Claim the rebalances...
1268         fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
1269         claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
1270
1271         // Add a duplicate new channel from 2 to 4
1272         let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1273
1274         // Send some payments across both channels
1275         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1276         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1277         let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
1278
1279
1280         route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
1281         let events = nodes[0].node.get_and_clear_pending_msg_events();
1282         assert_eq!(events.len(), 0);
1283         nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
1284
1285         //TODO: Test that routes work again here as we've been notified that the channel is full
1286
1287         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
1288         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
1289         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
1290
1291         // Close down the channels...
1292         close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
1293         close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
1294         close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
1295         close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
1296         close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
1297 }
1298
1299 #[test]
1300 fn holding_cell_htlc_counting() {
1301         // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
1302         // to ensure we don't end up with HTLCs sitting around in our holding cell for several
1303         // commitment dance rounds.
1304         let chanmon_cfgs = create_chanmon_cfgs(3);
1305         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1306         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1307         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1308         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1309         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
1310         let logger = test_utils::TestLogger::new();
1311
1312         let mut payments = Vec::new();
1313         for _ in 0..::ln::channel::OUR_MAX_HTLCS {
1314                 let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]);
1315                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1316                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1317                 nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
1318                 payments.push((payment_preimage, payment_hash));
1319         }
1320         check_added_monitors!(nodes[1], 1);
1321
1322         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1323         assert_eq!(events.len(), 1);
1324         let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
1325         assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
1326
1327         // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
1328         // the holding cell waiting on B's RAA to send. At this point we should not be able to add
1329         // another HTLC.
1330         let (_, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[2]);
1331         {
1332                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1333                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1334                 unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)), true, APIError::ChannelUnavailable { ref err },
1335                         assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
1336                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1337                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
1338         }
1339
1340         // This should also be true if we try to forward a payment.
1341         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[2]);
1342         {
1343                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1344                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
1345                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
1346                 check_added_monitors!(nodes[0], 1);
1347         }
1348
1349         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1350         assert_eq!(events.len(), 1);
1351         let payment_event = SendEvent::from_event(events.pop().unwrap());
1352         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1353
1354         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1355         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
1356         // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
1357         // fails), the second will process the resulting failure and fail the HTLC backward.
1358         expect_pending_htlcs_forwardable!(nodes[1]);
1359         expect_pending_htlcs_forwardable!(nodes[1]);
1360         check_added_monitors!(nodes[1], 1);
1361
1362         let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1363         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
1364         commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
1365
1366         expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false);
1367         expect_payment_failed!(nodes[0], payment_hash_2, false);
1368
1369         // Now forward all the pending HTLCs and claim them back
1370         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]);
1371         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg);
1372         check_added_monitors!(nodes[2], 1);
1373
1374         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1375         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1376         check_added_monitors!(nodes[1], 1);
1377         let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1378
1379         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1380         check_added_monitors!(nodes[1], 1);
1381         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1382
1383         for ref update in as_updates.update_add_htlcs.iter() {
1384                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update);
1385         }
1386         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed);
1387         check_added_monitors!(nodes[2], 1);
1388         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1389         check_added_monitors!(nodes[2], 1);
1390         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1391
1392         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1393         check_added_monitors!(nodes[1], 1);
1394         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed);
1395         check_added_monitors!(nodes[1], 1);
1396         let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1397
1398         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
1399         check_added_monitors!(nodes[2], 1);
1400
1401         expect_pending_htlcs_forwardable!(nodes[2]);
1402
1403         let events = nodes[2].node.get_and_clear_pending_events();
1404         assert_eq!(events.len(), payments.len());
1405         for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
1406                 match event {
1407                         &Event::PaymentReceived { ref payment_hash, .. } => {
1408                                 assert_eq!(*payment_hash, *hash);
1409                         },
1410                         _ => panic!("Unexpected event"),
1411                 };
1412         }
1413
1414         for (preimage, _) in payments.drain(..) {
1415                 claim_payment(&nodes[1], &[&nodes[2]], preimage);
1416         }
1417
1418         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1419 }
1420
1421 #[test]
1422 fn duplicate_htlc_test() {
1423         // Test that we accept duplicate payment_hash HTLCs across the network and that
1424         // claiming/failing them are all separate and don't affect each other
1425         let chanmon_cfgs = create_chanmon_cfgs(6);
1426         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
1427         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
1428         let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
1429
1430         // Create some initial channels to route via 3 to 4/5 from 0/1/2
1431         create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
1432         create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
1433         create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
1434         create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
1435         create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
1436
1437         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
1438
1439         *nodes[0].network_payment_count.borrow_mut() -= 1;
1440         assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
1441
1442         *nodes[0].network_payment_count.borrow_mut() -= 1;
1443         assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
1444
1445         claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
1446         fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
1447         claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
1448 }
1449
1450 #[test]
1451 fn test_duplicate_htlc_different_direction_onchain() {
1452         // Test that ChannelMonitor doesn't generate 2 preimage txn
1453         // when we have 2 HTLCs with same preimage that go across a node
1454         // in opposite directions, even with the same payment secret.
1455         let chanmon_cfgs = create_chanmon_cfgs(2);
1456         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1457         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1458         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1459
1460         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
1461         let logger = test_utils::TestLogger::new();
1462
1463         // balancing
1464         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
1465
1466         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
1467
1468         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
1469         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 800_000, TEST_FINAL_CLTV, &logger).unwrap();
1470         let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
1471         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret);
1472
1473         // Provide preimage to node 0 by claiming payment
1474         nodes[0].node.claim_funds(payment_preimage);
1475         check_added_monitors!(nodes[0], 1);
1476
1477         // Broadcast node 1 commitment txn
1478         let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
1479
1480         assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
1481         let mut has_both_htlcs = 0; // check htlcs match ones committed
1482         for outp in remote_txn[0].output.iter() {
1483                 if outp.value == 800_000 / 1000 {
1484                         has_both_htlcs += 1;
1485                 } else if outp.value == 900_000 / 1000 {
1486                         has_both_htlcs += 1;
1487                 }
1488         }
1489         assert_eq!(has_both_htlcs, 2);
1490
1491         mine_transaction(&nodes[0], &remote_txn[0]);
1492         check_added_monitors!(nodes[0], 1);
1493         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
1494
1495         // Check we only broadcast 1 timeout tx
1496         let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
1497         assert_eq!(claim_txn.len(), 8);
1498         assert_eq!(claim_txn[1], claim_txn[4]);
1499         assert_eq!(claim_txn[2], claim_txn[5]);
1500         check_spends!(claim_txn[1], chan_1.3);
1501         check_spends!(claim_txn[2], claim_txn[1]);
1502         check_spends!(claim_txn[7], claim_txn[1]);
1503
1504         assert_eq!(claim_txn[0].input.len(), 1);
1505         assert_eq!(claim_txn[3].input.len(), 1);
1506         assert_eq!(claim_txn[0].input[0].previous_output, claim_txn[3].input[0].previous_output);
1507
1508         assert_eq!(claim_txn[0].input.len(), 1);
1509         assert_eq!(claim_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
1510         check_spends!(claim_txn[0], remote_txn[0]);
1511         assert_eq!(remote_txn[0].output[claim_txn[0].input[0].previous_output.vout as usize].value, 800);
1512         assert_eq!(claim_txn[6].input.len(), 1);
1513         assert_eq!(claim_txn[6].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
1514         check_spends!(claim_txn[6], remote_txn[0]);
1515         assert_eq!(remote_txn[0].output[claim_txn[6].input[0].previous_output.vout as usize].value, 900);
1516
1517         let events = nodes[0].node.get_and_clear_pending_msg_events();
1518         assert_eq!(events.len(), 3);
1519         for e in events {
1520                 match e {
1521                         MessageSendEvent::BroadcastChannelUpdate { .. } => {},
1522                         MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
1523                                 assert_eq!(node_id, nodes[1].node.get_our_node_id());
1524                                 assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
1525                         },
1526                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
1527                                 assert!(update_add_htlcs.is_empty());
1528                                 assert!(update_fail_htlcs.is_empty());
1529                                 assert_eq!(update_fulfill_htlcs.len(), 1);
1530                                 assert!(update_fail_malformed_htlcs.is_empty());
1531                                 assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
1532                         },
1533                         _ => panic!("Unexpected event"),
1534                 }
1535         }
1536 }
1537
1538 #[test]
1539 fn test_basic_channel_reserve() {
1540         let chanmon_cfgs = create_chanmon_cfgs(2);
1541         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1542         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1543         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1544         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1545         let logger = test_utils::TestLogger::new();
1546
1547         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
1548         let channel_reserve = chan_stat.channel_reserve_msat;
1549
1550         // The 2* and +1 are for the fee spike reserve.
1551         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
1552         let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], chan.2), 1 + 1);
1553         let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
1554         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
1555         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), max_can_send + 1, TEST_FINAL_CLTV, &logger).unwrap();
1556         let err = nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).err().unwrap();
1557         match err {
1558                 PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
1559                         match &fails[0] {
1560                                 &APIError::ChannelUnavailable{ref err} =>
1561                                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
1562                                 _ => panic!("Unexpected error variant"),
1563                         }
1564                 },
1565                 _ => panic!("Unexpected error variant"),
1566         }
1567         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1568         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1569
1570         send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
1571 }
1572
1573 #[test]
1574 fn test_fee_spike_violation_fails_htlc() {
1575         let chanmon_cfgs = create_chanmon_cfgs(2);
1576         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1577         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1578         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1579         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1580
1581         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 3460001);
1582         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1583         let secp_ctx = Secp256k1::new();
1584         let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
1585
1586         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1587
1588         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1589         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3460001, &Some(payment_secret), cur_height, &None).unwrap();
1590         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1591         let msg = msgs::UpdateAddHTLC {
1592                 channel_id: chan.2,
1593                 htlc_id: 0,
1594                 amount_msat: htlc_msat,
1595                 payment_hash: payment_hash,
1596                 cltv_expiry: htlc_cltv,
1597                 onion_routing_packet: onion_packet,
1598         };
1599
1600         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1601
1602         // Now manually create the commitment_signed message corresponding to the update_add
1603         // nodes[0] just sent. In the code for construction of this message, "local" refers
1604         // to the sender of the message, and "remote" refers to the receiver.
1605
1606         let feerate_per_kw = get_feerate!(nodes[0], chan.2);
1607
1608         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
1609
1610         // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
1611         // needed to sign the new commitment tx and (2) sign the new commitment tx.
1612         let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point) = {
1613                 let chan_lock = nodes[0].node.channel_state.lock().unwrap();
1614                 let local_chan = chan_lock.by_id.get(&chan.2).unwrap();
1615                 let chan_signer = local_chan.get_signer();
1616                 let pubkeys = chan_signer.pubkeys();
1617                 (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
1618                  chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
1619                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx))
1620         };
1621         let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point) = {
1622                 let chan_lock = nodes[1].node.channel_state.lock().unwrap();
1623                 let remote_chan = chan_lock.by_id.get(&chan.2).unwrap();
1624                 let chan_signer = remote_chan.get_signer();
1625                 let pubkeys = chan_signer.pubkeys();
1626                 (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
1627                  chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx))
1628         };
1629
1630         // Assemble the set of keys we can use for signatures for our commitment_signed message.
1631         let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
1632                 &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
1633
1634         // Build the remote commitment transaction so we can sign it, and then later use the
1635         // signature for the commitment_signed message.
1636         let local_chan_balance = 1313;
1637
1638         let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
1639                 offered: false,
1640                 amount_msat: 3460001,
1641                 cltv_expiry: htlc_cltv,
1642                 payment_hash,
1643                 transaction_output_index: Some(1),
1644         };
1645
1646         let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
1647
1648         let res = {
1649                 let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
1650                 let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap();
1651                 let local_chan_signer = local_chan.get_signer();
1652                 let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
1653                         commitment_number,
1654                         95000,
1655                         local_chan_balance,
1656                         commit_tx_keys.clone(),
1657                         feerate_per_kw,
1658                         &mut vec![(accepted_htlc_info, ())],
1659                         &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
1660                 );
1661                 local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
1662         };
1663
1664         let commit_signed_msg = msgs::CommitmentSigned {
1665                 channel_id: chan.2,
1666                 signature: res.0,
1667                 htlc_signatures: res.1
1668         };
1669
1670         // Send the commitment_signed message to the nodes[1].
1671         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
1672         let _ = nodes[1].node.get_and_clear_pending_msg_events();
1673
1674         // Send the RAA to nodes[1].
1675         let raa_msg = msgs::RevokeAndACK {
1676                 channel_id: chan.2,
1677                 per_commitment_secret: local_secret,
1678                 next_per_commitment_point: next_local_point
1679         };
1680         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
1681
1682         let events = nodes[1].node.get_and_clear_pending_msg_events();
1683         assert_eq!(events.len(), 1);
1684         // Make sure the HTLC failed in the way we expect.
1685         match events[0] {
1686                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => {
1687                         assert_eq!(update_fail_htlcs.len(), 1);
1688                         update_fail_htlcs[0].clone()
1689                 },
1690                 _ => panic!("Unexpected event"),
1691         };
1692         nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
1693                 format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
1694
1695         check_added_monitors!(nodes[1], 2);
1696 }
1697
1698 #[test]
1699 fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
1700         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1701         // Set the fee rate for the channel very high, to the point where the fundee
1702         // sending any above-dust amount would result in a channel reserve violation.
1703         // In this test we check that we would be prevented from sending an HTLC in
1704         // this situation.
1705         let feerate_per_kw = 253;
1706         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1707         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1708         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1709         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1710         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1711
1712         let mut push_amt = 100_000_000;
1713         push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
1714         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
1715
1716         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
1717
1718         // Sending exactly enough to hit the reserve amount should be accepted
1719         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
1720
1721         // However one more HTLC should be significantly over the reserve amount and fail.
1722         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
1723         unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
1724                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1725         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1726         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1);
1727 }
1728
1729 #[test]
1730 fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
1731         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1732         // Set the fee rate for the channel very high, to the point where the funder
1733         // receiving 1 update_add_htlc would result in them closing the channel due
1734         // to channel reserve violation. This close could also happen if the fee went
1735         // up a more realistic amount, but many HTLCs were outstanding at the time of
1736         // the update_add_htlc.
1737         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
1738         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(6000) };
1739         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1740         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1741         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1742         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1743
1744         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
1745         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1746         let secp_ctx = Secp256k1::new();
1747         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1748         let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
1749         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
1750         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 1000, &Some(payment_secret), cur_height, &None).unwrap();
1751         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
1752         let msg = msgs::UpdateAddHTLC {
1753                 channel_id: chan.2,
1754                 htlc_id: 1,
1755                 amount_msat: htlc_msat + 1,
1756                 payment_hash: payment_hash,
1757                 cltv_expiry: htlc_cltv,
1758                 onion_routing_packet: onion_packet,
1759         };
1760
1761         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
1762         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1763         nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
1764         assert_eq!(nodes[0].node.list_channels().len(), 0);
1765         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
1766         assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
1767         check_added_monitors!(nodes[0], 1);
1768 }
1769
1770 #[test]
1771 fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
1772         // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
1773         // calculating our commitment transaction fee (this was previously broken).
1774         let mut chanmon_cfgs = create_chanmon_cfgs(2);
1775         let feerate_per_kw = 253;
1776         chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1777         chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(feerate_per_kw) };
1778
1779         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1780         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1781         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1782
1783         // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
1784         // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
1785         // transaction fee with 0 HTLCs (183 sats)).
1786         let mut push_amt = 100_000_000;
1787         push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT) / 1000 * 1000;
1788         push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
1789         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
1790
1791         let dust_amt = crate::ln::channel::MIN_DUST_LIMIT_SATOSHIS * 1000
1792                 + feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000 * 1000 - 1;
1793         // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
1794         // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
1795         // commitment transaction fee.
1796         let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
1797
1798         // One more than the dust amt should fail, however.
1799         let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
1800         unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
1801                 assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
1802 }
1803
1804 #[test]
1805 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
1806         // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
1807         // calculating our counterparty's commitment transaction fee (this was previously broken).
1808         let chanmon_cfgs = create_chanmon_cfgs(2);
1809         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1810         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
1811         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1812         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000, InitFeatures::known(), InitFeatures::known());
1813
1814         let payment_amt = 46000; // Dust amount
1815         // In the previous code, these first four payments would succeed.
1816         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1817         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1818         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1819         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1820
1821         // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
1822         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1823         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1824         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1825         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1826         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1827
1828         // And this last payment previously resulted in nodes[1] closing on its inbound-channel
1829         // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
1830         // transaction fee and therefore perceived this next payment as a channel reserve violation.
1831         let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
1832 }
1833
1834 #[test]
1835 fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
1836         let chanmon_cfgs = create_chanmon_cfgs(3);
1837         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1838         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1839         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1840         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1841         let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1842
1843         let feemsat = 239;
1844         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1845         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
1846         let feerate = get_feerate!(nodes[0], chan.2);
1847
1848         // Add a 2* and +1 for the fee spike reserve.
1849         let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1);
1850         let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
1851         let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
1852
1853         // Add a pending HTLC.
1854         let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1);
1855         let payment_event_1 = {
1856                 nodes[0].node.send_payment(&route_1, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
1857                 check_added_monitors!(nodes[0], 1);
1858
1859                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1860                 assert_eq!(events.len(), 1);
1861                 SendEvent::from_event(events.remove(0))
1862         };
1863         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
1864
1865         // Attempt to trigger a channel reserve violation --> payment failure.
1866         let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2);
1867         let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
1868         let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
1869         let (route_2, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_2);
1870
1871         // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
1872         let secp_ctx = Secp256k1::new();
1873         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
1874         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
1875         let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
1876         let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route_2.paths[0], recv_value_2, &None, cur_height, &None).unwrap();
1877         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1);
1878         let msg = msgs::UpdateAddHTLC {
1879                 channel_id: chan.2,
1880                 htlc_id: 1,
1881                 amount_msat: htlc_msat + 1,
1882                 payment_hash: our_payment_hash_1,
1883                 cltv_expiry: htlc_cltv,
1884                 onion_routing_packet: onion_packet,
1885         };
1886
1887         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
1888         // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
1889         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1);
1890         assert_eq!(nodes[1].node.list_channels().len(), 1);
1891         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
1892         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
1893         check_added_monitors!(nodes[1], 1);
1894 }
1895
1896 #[test]
1897 fn test_inbound_outbound_capacity_is_not_zero() {
1898         let chanmon_cfgs = create_chanmon_cfgs(2);
1899         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1900         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1901         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1902         let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
1903         let channels0 = node_chanmgrs[0].list_channels();
1904         let channels1 = node_chanmgrs[1].list_channels();
1905         assert_eq!(channels0.len(), 1);
1906         assert_eq!(channels1.len(), 1);
1907
1908         let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100000);
1909         assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
1910         assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
1911
1912         assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1913         assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
1914 }
1915
1916 fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 {
1917         (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
1918 }
1919
1920 #[test]
1921 fn test_channel_reserve_holding_cell_htlcs() {
1922         let chanmon_cfgs = create_chanmon_cfgs(3);
1923         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1924         // When this test was written, the default base fee floated based on the HTLC count.
1925         // It is now fixed, so we simply set the fee to the expected value here.
1926         let mut config = test_default_channel_config();
1927         config.channel_options.forwarding_fee_base_msat = 239;
1928         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
1929         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1930         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known());
1931         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001, InitFeatures::known(), InitFeatures::known());
1932
1933         let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
1934         let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
1935
1936         let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
1937         let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
1938
1939         macro_rules! expect_forward {
1940                 ($node: expr) => {{
1941                         let mut events = $node.node.get_and_clear_pending_msg_events();
1942                         assert_eq!(events.len(), 1);
1943                         check_added_monitors!($node, 1);
1944                         let payment_event = SendEvent::from_event(events.remove(0));
1945                         payment_event
1946                 }}
1947         }
1948
1949         let feemsat = 239; // set above
1950         let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
1951         let feerate = get_feerate!(nodes[0], chan_1.2);
1952
1953         let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
1954
1955         // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
1956         {
1957                 let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0);
1958                 route.paths[0].last_mut().unwrap().fee_msat += 1;
1959                 assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
1960                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
1961                         assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
1962                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1963                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
1964         }
1965
1966         // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
1967         // nodes[0]'s wealth
1968         loop {
1969                 let amt_msat = recv_value_0 + total_fee_msat;
1970                 // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
1971                 // Also, ensure that each payment has enough to be over the dust limit to
1972                 // ensure it'll be included in each commit tx fee calculation.
1973                 let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
1974                 let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
1975                 if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
1976                         break;
1977                 }
1978                 send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
1979
1980                 let (stat01_, stat11_, stat12_, stat22_) = (
1981                         get_channel_value_stat!(nodes[0], chan_1.2),
1982                         get_channel_value_stat!(nodes[1], chan_1.2),
1983                         get_channel_value_stat!(nodes[1], chan_2.2),
1984                         get_channel_value_stat!(nodes[2], chan_2.2),
1985                 );
1986
1987                 assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
1988                 assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
1989                 assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
1990                 assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
1991                 stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
1992         }
1993
1994         // adding pending output.
1995         // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve.
1996         // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity
1997         // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to
1998         // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us
1999         // to test channel channel reserve policy at the edges of what amount is sendable, i.e.
2000         // cases where 1 msat over X amount will cause a payment failure, but anything less than
2001         // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting
2002         // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
2003         // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
2004         // policy.
2005         let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1);
2006         let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
2007         let amt_msat_1 = recv_value_1 + total_fee_msat;
2008
2009         let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1);
2010         let payment_event_1 = {
2011                 nodes[0].node.send_payment(&route_1, our_payment_hash_1, &Some(our_payment_secret_1)).unwrap();
2012                 check_added_monitors!(nodes[0], 1);
2013
2014                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2015                 assert_eq!(events.len(), 1);
2016                 SendEvent::from_event(events.remove(0))
2017         };
2018         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
2019
2020         // channel reserve test with htlc pending output > 0
2021         let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
2022         {
2023                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_2 + 1);
2024                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
2025                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
2026                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2027         }
2028
2029         // split the rest to test holding cell
2030         let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1);
2031         let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
2032         let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
2033         let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
2034         {
2035                 let stat = get_channel_value_stat!(nodes[0], chan_1.2);
2036                 assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat);
2037         }
2038
2039         // now see if they go through on both sides
2040         let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21);
2041         // but this will stuck in the holding cell
2042         nodes[0].node.send_payment(&route_21, our_payment_hash_21, &Some(our_payment_secret_21)).unwrap();
2043         check_added_monitors!(nodes[0], 0);
2044         let events = nodes[0].node.get_and_clear_pending_events();
2045         assert_eq!(events.len(), 0);
2046
2047         // test with outbound holding cell amount > 0
2048         {
2049                 let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22+1);
2050                 unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
2051                         assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
2052                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2053                 nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 2);
2054         }
2055
2056         let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
2057         // this will also stuck in the holding cell
2058         nodes[0].node.send_payment(&route_22, our_payment_hash_22, &Some(our_payment_secret_22)).unwrap();
2059         check_added_monitors!(nodes[0], 0);
2060         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2061         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2062
2063         // flush the pending htlc
2064         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg);
2065         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2066         check_added_monitors!(nodes[1], 1);
2067
2068         // the pending htlc should be promoted to committed
2069         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
2070         check_added_monitors!(nodes[0], 1);
2071         let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2072
2073         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed);
2074         let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2075         // No commitment_signed so get_event_msg's assert(len == 1) passes
2076         check_added_monitors!(nodes[0], 1);
2077
2078         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack);
2079         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2080         check_added_monitors!(nodes[1], 1);
2081
2082         expect_pending_htlcs_forwardable!(nodes[1]);
2083
2084         let ref payment_event_11 = expect_forward!(nodes[1]);
2085         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
2086         commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
2087
2088         expect_pending_htlcs_forwardable!(nodes[2]);
2089         expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
2090
2091         // flush the htlcs in the holding cell
2092         assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
2093         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
2094         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
2095         commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
2096         expect_pending_htlcs_forwardable!(nodes[1]);
2097
2098         let ref payment_event_3 = expect_forward!(nodes[1]);
2099         assert_eq!(payment_event_3.msgs.len(), 2);
2100         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]);
2101         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
2102
2103         commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
2104         expect_pending_htlcs_forwardable!(nodes[2]);
2105
2106         let events = nodes[2].node.get_and_clear_pending_events();
2107         assert_eq!(events.len(), 2);
2108         match events[0] {
2109                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
2110                         assert_eq!(our_payment_hash_21, *payment_hash);
2111                         assert_eq!(recv_value_21, amt);
2112                         match &purpose {
2113                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2114                                         assert!(payment_preimage.is_none());
2115                                         assert_eq!(our_payment_secret_21, *payment_secret);
2116                                 },
2117                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2118                         }
2119                 },
2120                 _ => panic!("Unexpected event"),
2121         }
2122         match events[1] {
2123                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
2124                         assert_eq!(our_payment_hash_22, *payment_hash);
2125                         assert_eq!(recv_value_22, amt);
2126                         match &purpose {
2127                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
2128                                         assert!(payment_preimage.is_none());
2129                                         assert_eq!(our_payment_secret_22, *payment_secret);
2130                                 },
2131                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
2132                         }
2133                 },
2134                 _ => panic!("Unexpected event"),
2135         }
2136
2137         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
2138         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
2139         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
2140
2141         let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1);
2142         let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
2143         send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
2144
2145         let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1);
2146         let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
2147         let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
2148         assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
2149         assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc);
2150
2151         let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
2152         assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3);
2153 }
2154
2155 #[test]
2156 fn channel_reserve_in_flight_removes() {
2157         // In cases where one side claims an HTLC, it thinks it has additional available funds that it
2158         // can send to its counterparty, but due to update ordering, the other side may not yet have
2159         // considered those HTLCs fully removed.
2160         // This tests that we don't count HTLCs which will not be included in the next remote
2161         // commitment transaction towards the reserve value (as it implies no commitment transaction
2162         // will be generated which violates the remote reserve value).
2163         // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
2164         // To test this we:
2165         //  * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
2166         //    you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
2167         //    you only consider the value of the first HTLC, it may not),
2168         //  * start routing a third HTLC from A to B,
2169         //  * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
2170         //    the other claim in its holding cell, as it immediately goes into AwaitingRAA),
2171         //  * deliver the first fulfill from B
2172         //  * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
2173         //    claim,
2174         //  * deliver A's response CS and RAA.
2175         //    This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
2176         //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
2177         //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
2178         //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
2179         let chanmon_cfgs = create_chanmon_cfgs(2);
2180         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2181         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2182         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2183         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2184         let logger = test_utils::TestLogger::new();
2185
2186         let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
2187         // Route the first two HTLCs.
2188         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
2189         let (payment_preimage_2, _, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
2190
2191         // Start routing the third HTLC (this is just used to get everyone in the right state).
2192         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
2193         let send_1 = {
2194                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
2195                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
2196                 nodes[0].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
2197                 check_added_monitors!(nodes[0], 1);
2198                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2199                 assert_eq!(events.len(), 1);
2200                 SendEvent::from_event(events.remove(0))
2201         };
2202
2203         // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
2204         // initial fulfill/CS.
2205         assert!(nodes[1].node.claim_funds(payment_preimage_1));
2206         check_added_monitors!(nodes[1], 1);
2207         let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2208
2209         // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
2210         // remove the second HTLC when we send the HTLC back from B to A.
2211         assert!(nodes[1].node.claim_funds(payment_preimage_2));
2212         check_added_monitors!(nodes[1], 1);
2213         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2214
2215         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]);
2216         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
2217         check_added_monitors!(nodes[0], 1);
2218         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2219         expect_payment_sent!(nodes[0], payment_preimage_1);
2220
2221         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
2222         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
2223         check_added_monitors!(nodes[1], 1);
2224         // B is already AwaitingRAA, so cant generate a CS here
2225         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2226
2227         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2228         check_added_monitors!(nodes[1], 1);
2229         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2230
2231         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2232         check_added_monitors!(nodes[0], 1);
2233         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2234
2235         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2236         check_added_monitors!(nodes[1], 1);
2237         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2238
2239         // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
2240         // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
2241         // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
2242         // can no longer broadcast a commitment transaction with it and B has the preimage so can go
2243         // on-chain as necessary).
2244         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]);
2245         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
2246         check_added_monitors!(nodes[0], 1);
2247         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2248         expect_payment_sent!(nodes[0], payment_preimage_2);
2249
2250         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2251         check_added_monitors!(nodes[1], 1);
2252         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2253
2254         expect_pending_htlcs_forwardable!(nodes[1]);
2255         expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
2256
2257         // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
2258         // resolve the second HTLC from A's point of view.
2259         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2260         check_added_monitors!(nodes[0], 1);
2261         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2262
2263         // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
2264         // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
2265         let (payment_preimage_4, payment_hash_4, payment_secret_4) = get_payment_preimage_hash!(nodes[0]);
2266         let send_2 = {
2267                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
2268                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 10000, TEST_FINAL_CLTV, &logger).unwrap();
2269                 nodes[1].node.send_payment(&route, payment_hash_4, &Some(payment_secret_4)).unwrap();
2270                 check_added_monitors!(nodes[1], 1);
2271                 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
2272                 assert_eq!(events.len(), 1);
2273                 SendEvent::from_event(events.remove(0))
2274         };
2275
2276         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]);
2277         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg);
2278         check_added_monitors!(nodes[0], 1);
2279         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2280
2281         // Now just resolve all the outstanding messages/HTLCs for completeness...
2282
2283         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2284         check_added_monitors!(nodes[1], 1);
2285         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2286
2287         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
2288         check_added_monitors!(nodes[1], 1);
2289
2290         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2291         check_added_monitors!(nodes[0], 1);
2292         let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2293
2294         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
2295         check_added_monitors!(nodes[1], 1);
2296         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2297
2298         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2299         check_added_monitors!(nodes[0], 1);
2300
2301         expect_pending_htlcs_forwardable!(nodes[0]);
2302         expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
2303
2304         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
2305         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2306 }
2307
2308 #[test]
2309 fn channel_monitor_network_test() {
2310         // Simple test which builds a network of ChannelManagers, connects them to each other, and
2311         // tests that ChannelMonitor is able to recover from various states.
2312         let chanmon_cfgs = create_chanmon_cfgs(5);
2313         let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
2314         let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
2315         let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
2316
2317         // Create some initial channels
2318         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2319         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2320         let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
2321         let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
2322
2323         // Make sure all nodes are at the same starting height
2324         connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2325         connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2326         connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2327         connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1);
2328         connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1);
2329
2330         // Rebalance the network a bit by relaying one payment through all the channels...
2331         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2332         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2333         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2334         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
2335
2336         // Simple case with no pending HTLCs:
2337         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
2338         check_added_monitors!(nodes[1], 1);
2339         check_closed_broadcast!(nodes[1], false);
2340         {
2341                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
2342                 assert_eq!(node_txn.len(), 1);
2343                 mine_transaction(&nodes[0], &node_txn[0]);
2344                 check_added_monitors!(nodes[0], 1);
2345                 test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
2346         }
2347         check_closed_broadcast!(nodes[0], true);
2348         assert_eq!(nodes[0].node.list_channels().len(), 0);
2349         assert_eq!(nodes[1].node.list_channels().len(), 1);
2350
2351         // One pending HTLC is discarded by the force-close:
2352         let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
2353
2354         // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
2355         // broadcasted until we reach the timelock time).
2356         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
2357         check_closed_broadcast!(nodes[1], false);
2358         check_added_monitors!(nodes[1], 1);
2359         {
2360                 let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE);
2361                 connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2362                 test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2363                 mine_transaction(&nodes[2], &node_txn[0]);
2364                 check_added_monitors!(nodes[2], 1);
2365                 test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
2366         }
2367         check_closed_broadcast!(nodes[2], true);
2368         assert_eq!(nodes[1].node.list_channels().len(), 0);
2369         assert_eq!(nodes[2].node.list_channels().len(), 1);
2370
2371         macro_rules! claim_funds {
2372                 ($node: expr, $prev_node: expr, $preimage: expr) => {
2373                         {
2374                                 assert!($node.node.claim_funds($preimage));
2375                                 check_added_monitors!($node, 1);
2376
2377                                 let events = $node.node.get_and_clear_pending_msg_events();
2378                                 assert_eq!(events.len(), 1);
2379                                 match events[0] {
2380                                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
2381                                                 assert!(update_add_htlcs.is_empty());
2382                                                 assert!(update_fail_htlcs.is_empty());
2383                                                 assert_eq!(*node_id, $prev_node.node.get_our_node_id());
2384                                         },
2385                                         _ => panic!("Unexpected event"),
2386                                 };
2387                         }
2388                 }
2389         }
2390
2391         // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
2392         // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2393         nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
2394         check_added_monitors!(nodes[2], 1);
2395         check_closed_broadcast!(nodes[2], false);
2396         let node2_commitment_txid;
2397         {
2398                 let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2399                 connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1);
2400                 test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2401                 node2_commitment_txid = node_txn[0].txid();
2402
2403                 // Claim the payment on nodes[3], giving it knowledge of the preimage
2404                 claim_funds!(nodes[3], nodes[2], payment_preimage_1);
2405                 mine_transaction(&nodes[3], &node_txn[0]);
2406                 check_added_monitors!(nodes[3], 1);
2407                 check_preimage_claim(&nodes[3], &node_txn);
2408         }
2409         check_closed_broadcast!(nodes[3], true);
2410         assert_eq!(nodes[2].node.list_channels().len(), 0);
2411         assert_eq!(nodes[3].node.list_channels().len(), 1);
2412
2413         // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
2414         // confusing us in the following tests.
2415         let chan_3_mon = nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().remove(&OutPoint { txid: chan_3.3.txid(), index: 0 }).unwrap();
2416
2417         // One pending HTLC to time out:
2418         let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
2419         // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
2420         // buffer space).
2421
2422         let (close_chan_update_1, close_chan_update_2) = {
2423                 connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
2424                 let events = nodes[3].node.get_and_clear_pending_msg_events();
2425                 assert_eq!(events.len(), 2);
2426                 let close_chan_update_1 = match events[0] {
2427                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2428                                 msg.clone()
2429                         },
2430                         _ => panic!("Unexpected event"),
2431                 };
2432                 match events[1] {
2433                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2434                                 assert_eq!(node_id, nodes[4].node.get_our_node_id());
2435                         },
2436                         _ => panic!("Unexpected event"),
2437                 }
2438                 check_added_monitors!(nodes[3], 1);
2439
2440                 // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
2441                 {
2442                         let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap();
2443                         node_txn.retain(|tx| {
2444                                 if tx.input[0].previous_output.txid == node2_commitment_txid {
2445                                         false
2446                                 } else { true }
2447                         });
2448                 }
2449
2450                 let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
2451
2452                 // Claim the payment on nodes[4], giving it knowledge of the preimage
2453                 claim_funds!(nodes[4], nodes[3], payment_preimage_2);
2454
2455                 connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2456                 let events = nodes[4].node.get_and_clear_pending_msg_events();
2457                 assert_eq!(events.len(), 2);
2458                 let close_chan_update_2 = match events[0] {
2459                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
2460                                 msg.clone()
2461                         },
2462                         _ => panic!("Unexpected event"),
2463                 };
2464                 match events[1] {
2465                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
2466                                 assert_eq!(node_id, nodes[3].node.get_our_node_id());
2467                         },
2468                         _ => panic!("Unexpected event"),
2469                 }
2470                 check_added_monitors!(nodes[4], 1);
2471                 test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
2472
2473                 mine_transaction(&nodes[4], &node_txn[0]);
2474                 check_preimage_claim(&nodes[4], &node_txn);
2475                 (close_chan_update_1, close_chan_update_2)
2476         };
2477         nodes[3].net_graph_msg_handler.handle_channel_update(&close_chan_update_2).unwrap();
2478         nodes[4].net_graph_msg_handler.handle_channel_update(&close_chan_update_1).unwrap();
2479         assert_eq!(nodes[3].node.list_channels().len(), 0);
2480         assert_eq!(nodes[4].node.list_channels().len(), 0);
2481
2482         nodes[3].chain_monitor.chain_monitor.monitors.write().unwrap().insert(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon);
2483 }
2484
2485 #[test]
2486 fn test_justice_tx() {
2487         // Test justice txn built on revoked HTLC-Success tx, against both sides
2488         let mut alice_config = UserConfig::default();
2489         alice_config.channel_options.announced_channel = true;
2490         alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
2491         alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
2492         let mut bob_config = UserConfig::default();
2493         bob_config.channel_options.announced_channel = true;
2494         bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
2495         bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
2496         let user_cfgs = [Some(alice_config), Some(bob_config)];
2497         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2498         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2499         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
2500         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2501         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
2502         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2503         // Create some new channels:
2504         let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2505
2506         // A pending HTLC which will be revoked:
2507         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2508         // Get the will-be-revoked local txn from nodes[0]
2509         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
2510         assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
2511         assert_eq!(revoked_local_txn[0].input.len(), 1);
2512         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
2513         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
2514         assert_eq!(revoked_local_txn[1].input.len(), 1);
2515         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2516         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2517         // Revoke the old state
2518         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
2519
2520         {
2521                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2522                 {
2523                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2524                         assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment tx
2525                         assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
2526
2527                         check_spends!(node_txn[0], revoked_local_txn[0]);
2528                         node_txn.swap_remove(0);
2529                         node_txn.truncate(1);
2530                 }
2531                 check_added_monitors!(nodes[1], 1);
2532                 test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
2533
2534                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2535                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2536                 // Verify broadcast of revoked HTLC-timeout
2537                 let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
2538                 check_added_monitors!(nodes[0], 1);
2539                 // Broadcast revoked HTLC-timeout on node 1
2540                 mine_transaction(&nodes[1], &node_txn[1]);
2541                 test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
2542         }
2543         get_announce_close_broadcast_events(&nodes, 0, 1);
2544
2545         assert_eq!(nodes[0].node.list_channels().len(), 0);
2546         assert_eq!(nodes[1].node.list_channels().len(), 0);
2547
2548         // We test justice_tx build by A on B's revoked HTLC-Success tx
2549         // Create some new channels:
2550         let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2551         {
2552                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2553                 node_txn.clear();
2554         }
2555
2556         // A pending HTLC which will be revoked:
2557         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2558         // Get the will-be-revoked local txn from B
2559         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
2560         assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
2561         assert_eq!(revoked_local_txn[0].input.len(), 1);
2562         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
2563         assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
2564         // Revoke the old state
2565         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
2566         {
2567                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2568                 {
2569                         let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
2570                         assert_eq!(node_txn.len(), 2); //ChannelMonitor: penalty tx, ChannelManager: local commitment tx
2571                         assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
2572
2573                         check_spends!(node_txn[0], revoked_local_txn[0]);
2574                         node_txn.swap_remove(0);
2575                 }
2576                 check_added_monitors!(nodes[0], 1);
2577                 test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
2578
2579                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2580                 let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
2581                 check_added_monitors!(nodes[1], 1);
2582                 mine_transaction(&nodes[0], &node_txn[1]);
2583                 test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
2584         }
2585         get_announce_close_broadcast_events(&nodes, 0, 1);
2586         assert_eq!(nodes[0].node.list_channels().len(), 0);
2587         assert_eq!(nodes[1].node.list_channels().len(), 0);
2588 }
2589
2590 #[test]
2591 fn revoked_output_claim() {
2592         // Simple test to ensure a node will claim a revoked output when a stale remote commitment
2593         // transaction is broadcast by its counterparty
2594         let chanmon_cfgs = create_chanmon_cfgs(2);
2595         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2596         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2597         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2598         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2599         // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
2600         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2601         assert_eq!(revoked_local_txn.len(), 1);
2602         // Only output is the full channel value back to nodes[0]:
2603         assert_eq!(revoked_local_txn[0].output.len(), 1);
2604         // Send a payment through, updating everyone's latest commitment txn
2605         send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
2606
2607         // Inform nodes[1] that nodes[0] broadcast a stale tx
2608         mine_transaction(&nodes[1], &revoked_local_txn[0]);
2609         check_added_monitors!(nodes[1], 1);
2610         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2611         assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
2612
2613         check_spends!(node_txn[0], revoked_local_txn[0]);
2614         check_spends!(node_txn[1], chan_1.3);
2615
2616         // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
2617         mine_transaction(&nodes[0], &revoked_local_txn[0]);
2618         get_announce_close_broadcast_events(&nodes, 0, 1);
2619         check_added_monitors!(nodes[0], 1)
2620 }
2621
2622 #[test]
2623 fn claim_htlc_outputs_shared_tx() {
2624         // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
2625         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2626         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2627         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2628         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2629         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2630
2631         // Create some new channel:
2632         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2633
2634         // Rebalance the network to generate htlc in the two directions
2635         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
2636         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
2637         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2638         let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
2639
2640         // Get the will-be-revoked local txn from node[0]
2641         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2642         assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
2643         assert_eq!(revoked_local_txn[0].input.len(), 1);
2644         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
2645         assert_eq!(revoked_local_txn[1].input.len(), 1);
2646         assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
2647         assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
2648         check_spends!(revoked_local_txn[1], revoked_local_txn[0]);
2649
2650         //Revoke the old state
2651         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2652
2653         {
2654                 mine_transaction(&nodes[0], &revoked_local_txn[0]);
2655                 check_added_monitors!(nodes[0], 1);
2656                 mine_transaction(&nodes[1], &revoked_local_txn[0]);
2657                 check_added_monitors!(nodes[1], 1);
2658                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2659                 expect_payment_failed!(nodes[1], payment_hash_2, true);
2660
2661                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2662                 assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
2663
2664                 assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
2665                 check_spends!(node_txn[0], revoked_local_txn[0]);
2666
2667                 let mut witness_lens = BTreeSet::new();
2668                 witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
2669                 witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
2670                 witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
2671                 assert_eq!(witness_lens.len(), 3);
2672                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2673                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2674                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2675
2676                 // Next nodes[1] broadcasts its current local tx state:
2677                 assert_eq!(node_txn[1].input.len(), 1);
2678                 assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
2679         }
2680         get_announce_close_broadcast_events(&nodes, 0, 1);
2681         assert_eq!(nodes[0].node.list_channels().len(), 0);
2682         assert_eq!(nodes[1].node.list_channels().len(), 0);
2683 }
2684
2685 #[test]
2686 fn claim_htlc_outputs_single_tx() {
2687         // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
2688         let mut chanmon_cfgs = create_chanmon_cfgs(2);
2689         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
2690         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2691         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2692         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2693
2694         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2695
2696         // Rebalance the network to generate htlc in the two directions
2697         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
2698         // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
2699         // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
2700         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
2701         let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
2702
2703         // Get the will-be-revoked local txn from node[0]
2704         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
2705
2706         //Revoke the old state
2707         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
2708
2709         {
2710                 confirm_transaction_at(&nodes[0], &revoked_local_txn[0], 100);
2711                 check_added_monitors!(nodes[0], 1);
2712                 confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
2713                 check_added_monitors!(nodes[1], 1);
2714                 expect_pending_htlcs_forwardable_ignore!(nodes[0]);
2715
2716                 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2717                 expect_payment_failed!(nodes[1], payment_hash_2, true);
2718
2719                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
2720                 assert_eq!(node_txn.len(), 9);
2721                 // ChannelMonitor: justice tx revoked offered htlc, justice tx revoked received htlc, justice tx revoked to_local (3)
2722                 // ChannelManager: local commmitment + local HTLC-timeout (2)
2723                 // ChannelMonitor: bumped justice tx, after one increase, bumps on HTLC aren't generated not being substantial anymore, bump on revoked to_local isn't generated due to more room for expiration (2)
2724                 // ChannelMonitor: local commitment + local HTLC-timeout (2)
2725
2726                 // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
2727                 assert_eq!(node_txn[0].input.len(), 1);
2728                 check_spends!(node_txn[0], chan_1.3);
2729                 assert_eq!(node_txn[1].input.len(), 1);
2730                 let witness_script = node_txn[1].input[0].witness.last().unwrap();
2731                 assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
2732                 check_spends!(node_txn[1], node_txn[0]);
2733
2734                 // Justice transactions are indices 1-2-4
2735                 assert_eq!(node_txn[2].input.len(), 1);
2736                 assert_eq!(node_txn[3].input.len(), 1);
2737                 assert_eq!(node_txn[4].input.len(), 1);
2738
2739                 check_spends!(node_txn[2], revoked_local_txn[0]);
2740                 check_spends!(node_txn[3], revoked_local_txn[0]);
2741                 check_spends!(node_txn[4], revoked_local_txn[0]);
2742
2743                 let mut witness_lens = BTreeSet::new();
2744                 witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
2745                 witness_lens.insert(node_txn[3].input[0].witness.last().unwrap().len());
2746                 witness_lens.insert(node_txn[4].input[0].witness.last().unwrap().len());
2747                 assert_eq!(witness_lens.len(), 3);
2748                 assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
2749                 assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
2750                 assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
2751         }
2752         get_announce_close_broadcast_events(&nodes, 0, 1);
2753         assert_eq!(nodes[0].node.list_channels().len(), 0);
2754         assert_eq!(nodes[1].node.list_channels().len(), 0);
2755 }
2756
2757 #[test]
2758 fn test_htlc_on_chain_success() {
2759         // Test that in case of a unilateral close onchain, we detect the state of output and pass
2760         // the preimage backward accordingly. So here we test that ChannelManager is
2761         // broadcasting the right event to other nodes in payment path.
2762         // We test with two HTLCs simultaneously as that was not handled correctly in the past.
2763         // A --------------------> B ----------------------> C (preimage)
2764         // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
2765         // commitment transaction was broadcast.
2766         // Then, B should learn the preimage from said transactions, attempting to claim backwards
2767         // towards B.
2768         // B should be able to claim via preimage if A then broadcasts its local tx.
2769         // Finally, when A sees B's latest local commitment transaction it should be able to claim
2770         // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
2771         // PaymentSent event).
2772
2773         let chanmon_cfgs = create_chanmon_cfgs(3);
2774         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2775         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2776         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2777
2778         // Create some initial channels
2779         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2780         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2781
2782         // Ensure all nodes are at the same height
2783         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
2784         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
2785         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
2786         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
2787
2788         // Rebalance the network a bit by relaying one payment through all the channels...
2789         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2790         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2791
2792         let (our_payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2793         let (our_payment_preimage_2, _payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2794
2795         // Broadcast legit commitment tx from C on B's chain
2796         // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
2797         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2798         assert_eq!(commitment_tx.len(), 1);
2799         check_spends!(commitment_tx[0], chan_2.3);
2800         nodes[2].node.claim_funds(our_payment_preimage);
2801         nodes[2].node.claim_funds(our_payment_preimage_2);
2802         check_added_monitors!(nodes[2], 2);
2803         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2804         assert!(updates.update_add_htlcs.is_empty());
2805         assert!(updates.update_fail_htlcs.is_empty());
2806         assert!(updates.update_fail_malformed_htlcs.is_empty());
2807         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2808
2809         mine_transaction(&nodes[2], &commitment_tx[0]);
2810         check_closed_broadcast!(nodes[2], true);
2811         check_added_monitors!(nodes[2], 1);
2812         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
2813         assert_eq!(node_txn.len(), 5);
2814         assert_eq!(node_txn[0], node_txn[3]);
2815         assert_eq!(node_txn[1], node_txn[4]);
2816         assert_eq!(node_txn[2], commitment_tx[0]);
2817         check_spends!(node_txn[0], commitment_tx[0]);
2818         check_spends!(node_txn[1], commitment_tx[0]);
2819         assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2820         assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2821         assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2822         assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2823         assert_eq!(node_txn[0].lock_time, 0);
2824         assert_eq!(node_txn[1].lock_time, 0);
2825
2826         // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
2827         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
2828         connect_block(&nodes[1], &Block { header, txdata: node_txn});
2829         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
2830         {
2831                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2832                 assert_eq!(added_monitors.len(), 1);
2833                 assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
2834                 added_monitors.clear();
2835         }
2836         let events = nodes[1].node.get_and_clear_pending_msg_events();
2837         {
2838                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
2839                 assert_eq!(added_monitors.len(), 2);
2840                 assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
2841                 assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
2842                 added_monitors.clear();
2843         }
2844         assert_eq!(events.len(), 3);
2845         match events[0] {
2846                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
2847                 _ => panic!("Unexpected event"),
2848         }
2849         match events[1] {
2850                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
2851                 _ => panic!("Unexpected event"),
2852         }
2853
2854         match events[2] {
2855                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2856                         assert!(update_add_htlcs.is_empty());
2857                         assert!(update_fail_htlcs.is_empty());
2858                         assert_eq!(update_fulfill_htlcs.len(), 1);
2859                         assert!(update_fail_malformed_htlcs.is_empty());
2860                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
2861                 },
2862                 _ => panic!("Unexpected event"),
2863         };
2864         macro_rules! check_tx_local_broadcast {
2865                 ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { {
2866                         let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
2867                         assert_eq!(node_txn.len(), 3);
2868                         // Node[1]: ChannelManager: 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 (timeout tx)
2869                         // Node[0]: ChannelManager: 3 (commtiemtn tx, 2*HTLC-Timeout tx), ChannelMonitor: 2 HTLC-timeout
2870                         check_spends!(node_txn[1], $commitment_tx);
2871                         check_spends!(node_txn[2], $commitment_tx);
2872                         assert_ne!(node_txn[1].lock_time, 0);
2873                         assert_ne!(node_txn[2].lock_time, 0);
2874                         if $htlc_offered {
2875                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2876                                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2877                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2878                                 assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
2879                         } else {
2880                                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2881                                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
2882                                 assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2883                                 assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2884                         }
2885                         check_spends!(node_txn[0], $chan_tx);
2886                         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
2887                         node_txn.clear();
2888                 } }
2889         }
2890         // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
2891         // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a
2892         // timeout-claim of the output that nodes[2] just claimed via success.
2893         check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3);
2894
2895         // Broadcast legit commitment tx from A on B's chain
2896         // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
2897         let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
2898         check_spends!(node_a_commitment_tx[0], chan_1.3);
2899         mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
2900         check_closed_broadcast!(nodes[1], true);
2901         check_added_monitors!(nodes[1], 1);
2902         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
2903         assert_eq!(node_txn.len(), 6); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 3 (HTLC-Success, 2* RBF bumps of above HTLC txn)
2904         let commitment_spend =
2905                 if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].txid() {
2906                         check_spends!(node_txn[1], commitment_tx[0]);
2907                         check_spends!(node_txn[2], commitment_tx[0]);
2908                         assert_ne!(node_txn[1].input[0].previous_output.vout, node_txn[2].input[0].previous_output.vout);
2909                         &node_txn[0]
2910                 } else {
2911                         check_spends!(node_txn[0], commitment_tx[0]);
2912                         check_spends!(node_txn[1], commitment_tx[0]);
2913                         assert_ne!(node_txn[0].input[0].previous_output.vout, node_txn[1].input[0].previous_output.vout);
2914                         &node_txn[2]
2915                 };
2916
2917         check_spends!(commitment_spend, node_a_commitment_tx[0]);
2918         assert_eq!(commitment_spend.input.len(), 2);
2919         assert_eq!(commitment_spend.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2920         assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
2921         assert_eq!(commitment_spend.lock_time, 0);
2922         assert!(commitment_spend.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
2923         check_spends!(node_txn[3], chan_1.3);
2924         assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
2925         check_spends!(node_txn[4], node_txn[3]);
2926         check_spends!(node_txn[5], node_txn[3]);
2927         // We don't bother to check that B can claim the HTLC output on its commitment tx here as
2928         // we already checked the same situation with A.
2929
2930         // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
2931         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
2932         connect_block(&nodes[0], &Block { header, txdata: vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()] });
2933         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
2934         check_closed_broadcast!(nodes[0], true);
2935         check_added_monitors!(nodes[0], 1);
2936         let events = nodes[0].node.get_and_clear_pending_events();
2937         assert_eq!(events.len(), 2);
2938         let mut first_claimed = false;
2939         for event in events {
2940                 match event {
2941                         Event::PaymentSent { payment_preimage } => {
2942                                 if payment_preimage == our_payment_preimage {
2943                                         assert!(!first_claimed);
2944                                         first_claimed = true;
2945                                 } else {
2946                                         assert_eq!(payment_preimage, our_payment_preimage_2);
2947                                 }
2948                         },
2949                         _ => panic!("Unexpected event"),
2950                 }
2951         }
2952         check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0], chan_1.3);
2953 }
2954
2955 fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
2956         // Test that in case of a unilateral close onchain, we detect the state of output and
2957         // timeout the HTLC backward accordingly. So here we test that ChannelManager is
2958         // broadcasting the right event to other nodes in payment path.
2959         // A ------------------> B ----------------------> C (timeout)
2960         //    B's commitment tx                 C's commitment tx
2961         //            \                                  \
2962         //         B's HTLC timeout tx               B's timeout tx
2963
2964         let chanmon_cfgs = create_chanmon_cfgs(3);
2965         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2966         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2967         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2968         *nodes[0].connect_style.borrow_mut() = connect_style;
2969         *nodes[1].connect_style.borrow_mut() = connect_style;
2970         *nodes[2].connect_style.borrow_mut() = connect_style;
2971
2972         // Create some intial channels
2973         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
2974         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
2975
2976         // Rebalance the network a bit by relaying one payment thorugh all the channels...
2977         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2978         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
2979
2980         let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
2981
2982         // Broadcast legit commitment tx from C on B's chain
2983         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
2984         check_spends!(commitment_tx[0], chan_2.3);
2985         nodes[2].node.fail_htlc_backwards(&payment_hash);
2986         check_added_monitors!(nodes[2], 0);
2987         expect_pending_htlcs_forwardable!(nodes[2]);
2988         check_added_monitors!(nodes[2], 1);
2989
2990         let events = nodes[2].node.get_and_clear_pending_msg_events();
2991         assert_eq!(events.len(), 1);
2992         match events[0] {
2993                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
2994                         assert!(update_add_htlcs.is_empty());
2995                         assert!(!update_fail_htlcs.is_empty());
2996                         assert!(update_fulfill_htlcs.is_empty());
2997                         assert!(update_fail_malformed_htlcs.is_empty());
2998                         assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
2999                 },
3000                 _ => panic!("Unexpected event"),
3001         };
3002         mine_transaction(&nodes[2], &commitment_tx[0]);
3003         check_closed_broadcast!(nodes[2], true);
3004         check_added_monitors!(nodes[2], 1);
3005         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
3006         assert_eq!(node_txn.len(), 1);
3007         check_spends!(node_txn[0], chan_2.3);
3008         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
3009
3010         // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3011         // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3012         connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
3013         mine_transaction(&nodes[1], &commitment_tx[0]);
3014         let timeout_tx;
3015         {
3016                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
3017                 assert_eq!(node_txn.len(), 5); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (local commitment tx + HTLC-timeout), 1 timeout tx
3018                 assert_eq!(node_txn[0], node_txn[3]);
3019                 assert_eq!(node_txn[1], node_txn[4]);
3020
3021                 check_spends!(node_txn[2], commitment_tx[0]);
3022                 assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3023
3024                 check_spends!(node_txn[0], chan_2.3);
3025                 check_spends!(node_txn[1], node_txn[0]);
3026                 assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
3027                 assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
3028
3029                 timeout_tx = node_txn[2].clone();
3030                 node_txn.clear();
3031         }
3032
3033         mine_transaction(&nodes[1], &timeout_tx);
3034         check_added_monitors!(nodes[1], 1);
3035         check_closed_broadcast!(nodes[1], true);
3036         {
3037                 // B will rebroadcast a fee-bumped timeout transaction here.
3038                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3039                 assert_eq!(node_txn.len(), 1);
3040                 check_spends!(node_txn[0], commitment_tx[0]);
3041         }
3042
3043         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3044         {
3045                 // B may rebroadcast its own holder commitment transaction here, as a safeguard against
3046                 // some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
3047                 // original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
3048                 // shouldn't broadcast anything here, and in some connect style scenarios we do not.
3049                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3050                 if node_txn.len() == 1 {
3051                         check_spends!(node_txn[0], chan_2.3);
3052                 } else {
3053                         assert_eq!(node_txn.len(), 0);
3054                 }
3055         }
3056
3057         expect_pending_htlcs_forwardable!(nodes[1]);
3058         check_added_monitors!(nodes[1], 1);
3059         let events = nodes[1].node.get_and_clear_pending_msg_events();
3060         assert_eq!(events.len(), 1);
3061         match events[0] {
3062                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3063                         assert!(update_add_htlcs.is_empty());
3064                         assert!(!update_fail_htlcs.is_empty());
3065                         assert!(update_fulfill_htlcs.is_empty());
3066                         assert!(update_fail_malformed_htlcs.is_empty());
3067                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3068                 },
3069                 _ => panic!("Unexpected event"),
3070         };
3071
3072         // Broadcast legit commitment tx from B on A's chain
3073         let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
3074         check_spends!(commitment_tx[0], chan_1.3);
3075
3076         mine_transaction(&nodes[0], &commitment_tx[0]);
3077         connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
3078
3079         check_closed_broadcast!(nodes[0], true);
3080         check_added_monitors!(nodes[0], 1);
3081         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 commitment tx, ChannelMonitor : 1 timeout tx
3082         assert_eq!(node_txn.len(), 2);
3083         check_spends!(node_txn[0], chan_1.3);
3084         assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), 71);
3085         check_spends!(node_txn[1], commitment_tx[0]);
3086         assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
3087 }
3088
3089 #[test]
3090 fn test_htlc_on_chain_timeout() {
3091         do_test_htlc_on_chain_timeout(ConnectStyle::BestBlockFirstSkippingBlocks);
3092         do_test_htlc_on_chain_timeout(ConnectStyle::TransactionsFirstSkippingBlocks);
3093         do_test_htlc_on_chain_timeout(ConnectStyle::FullBlockViaListen);
3094 }
3095
3096 #[test]
3097 fn test_simple_commitment_revoked_fail_backward() {
3098         // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
3099         // and fail backward accordingly.
3100
3101         let chanmon_cfgs = create_chanmon_cfgs(3);
3102         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3103         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3104         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3105
3106         // Create some initial channels
3107         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3108         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3109
3110         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3111         // Get the will-be-revoked local txn from nodes[2]
3112         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3113         // Revoke the old state
3114         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3115
3116         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
3117
3118         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3119         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3120         check_added_monitors!(nodes[1], 1);
3121         check_closed_broadcast!(nodes[1], true);
3122
3123         expect_pending_htlcs_forwardable!(nodes[1]);
3124         check_added_monitors!(nodes[1], 1);
3125         let events = nodes[1].node.get_and_clear_pending_msg_events();
3126         assert_eq!(events.len(), 1);
3127         match events[0] {
3128                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3129                         assert!(update_add_htlcs.is_empty());
3130                         assert_eq!(update_fail_htlcs.len(), 1);
3131                         assert!(update_fulfill_htlcs.is_empty());
3132                         assert!(update_fail_malformed_htlcs.is_empty());
3133                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3134
3135                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3136                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3137                         expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true);
3138                         expect_payment_failed!(nodes[0], payment_hash, false);
3139                 },
3140                 _ => panic!("Unexpected event"),
3141         }
3142 }
3143
3144 fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
3145         // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
3146         // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
3147         // commitment transaction anymore.
3148         // To do this, we have the peer which will broadcast a revoked commitment transaction send
3149         // a number of update_fail/commitment_signed updates without ever sending the RAA in
3150         // response to our commitment_signed. This is somewhat misbehavior-y, though not
3151         // technically disallowed and we should probably handle it reasonably.
3152         // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
3153         // failed/fulfilled backwards must be in at least one of the latest two remote commitment
3154         // transactions:
3155         // * Once we move it out of our holding cell/add it, we will immediately include it in a
3156         //   commitment_signed (implying it will be in the latest remote commitment transaction).
3157         // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
3158         //   and once they revoke the previous commitment transaction (allowing us to send a new
3159         //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
3160         let chanmon_cfgs = create_chanmon_cfgs(3);
3161         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3162         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3163         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3164
3165         // Create some initial channels
3166         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3167         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3168
3169         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
3170         // Get the will-be-revoked local txn from nodes[2]
3171         let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
3172         assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
3173         // Revoke the old state
3174         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
3175
3176         let value = if use_dust {
3177                 // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
3178                 // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
3179                 nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
3180         } else { 3000000 };
3181
3182         let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3183         let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3184         let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
3185
3186         assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
3187         expect_pending_htlcs_forwardable!(nodes[2]);
3188         check_added_monitors!(nodes[2], 1);
3189         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3190         assert!(updates.update_add_htlcs.is_empty());
3191         assert!(updates.update_fulfill_htlcs.is_empty());
3192         assert!(updates.update_fail_malformed_htlcs.is_empty());
3193         assert_eq!(updates.update_fail_htlcs.len(), 1);
3194         assert!(updates.update_fee.is_none());
3195         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3196         let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
3197         // Drop the last RAA from 3 -> 2
3198
3199         assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
3200         expect_pending_htlcs_forwardable!(nodes[2]);
3201         check_added_monitors!(nodes[2], 1);
3202         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3203         assert!(updates.update_add_htlcs.is_empty());
3204         assert!(updates.update_fulfill_htlcs.is_empty());
3205         assert!(updates.update_fail_malformed_htlcs.is_empty());
3206         assert_eq!(updates.update_fail_htlcs.len(), 1);
3207         assert!(updates.update_fee.is_none());
3208         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3209         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3210         check_added_monitors!(nodes[1], 1);
3211         // Note that nodes[1] is in AwaitingRAA, so won't send a CS
3212         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3213         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3214         check_added_monitors!(nodes[2], 1);
3215
3216         assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
3217         expect_pending_htlcs_forwardable!(nodes[2]);
3218         check_added_monitors!(nodes[2], 1);
3219         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3220         assert!(updates.update_add_htlcs.is_empty());
3221         assert!(updates.update_fulfill_htlcs.is_empty());
3222         assert!(updates.update_fail_malformed_htlcs.is_empty());
3223         assert_eq!(updates.update_fail_htlcs.len(), 1);
3224         assert!(updates.update_fee.is_none());
3225         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
3226         // At this point first_payment_hash has dropped out of the latest two commitment
3227         // transactions that nodes[1] is tracking...
3228         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
3229         check_added_monitors!(nodes[1], 1);
3230         // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
3231         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
3232         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
3233         check_added_monitors!(nodes[2], 1);
3234
3235         // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
3236         // on nodes[2]'s RAA.
3237         let (_, fourth_payment_hash, fourth_payment_secret) = get_payment_preimage_hash!(nodes[2]);
3238         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
3239         let logger = test_utils::TestLogger::new();
3240         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3241         nodes[1].node.send_payment(&route, fourth_payment_hash, &Some(fourth_payment_secret)).unwrap();
3242         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3243         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3244         check_added_monitors!(nodes[1], 0);
3245
3246         if deliver_bs_raa {
3247                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa);
3248                 // One monitor for the new revocation preimage, no second on as we won't generate a new
3249                 // commitment transaction for nodes[0] until process_pending_htlc_forwards().
3250                 check_added_monitors!(nodes[1], 1);
3251                 let events = nodes[1].node.get_and_clear_pending_events();
3252                 assert_eq!(events.len(), 1);
3253                 match events[0] {
3254                         Event::PendingHTLCsForwardable { .. } => { },
3255                         _ => panic!("Unexpected event"),
3256                 };
3257                 // Deliberately don't process the pending fail-back so they all fail back at once after
3258                 // block connection just like the !deliver_bs_raa case
3259         }
3260
3261         let mut failed_htlcs = HashSet::new();
3262         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3263
3264         mine_transaction(&nodes[1], &revoked_local_txn[0]);
3265         check_added_monitors!(nodes[1], 1);
3266         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
3267
3268         let events = nodes[1].node.get_and_clear_pending_events();
3269         assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
3270         match events[0] {
3271                 Event::PaymentFailed { ref payment_hash, .. } => {
3272                         assert_eq!(*payment_hash, fourth_payment_hash);
3273                 },
3274                 _ => panic!("Unexpected event"),
3275         }
3276         if !deliver_bs_raa {
3277                 match events[1] {
3278                         Event::PendingHTLCsForwardable { .. } => { },
3279                         _ => panic!("Unexpected event"),
3280                 };
3281         }
3282         nodes[1].node.process_pending_htlc_forwards();
3283         check_added_monitors!(nodes[1], 1);
3284
3285         let events = nodes[1].node.get_and_clear_pending_msg_events();
3286         assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
3287         match events[if deliver_bs_raa { 1 } else { 0 }] {
3288                 MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
3289                 _ => panic!("Unexpected event"),
3290         }
3291         match events[if deliver_bs_raa { 2 } else { 1 }] {
3292                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
3293                         assert_eq!(channel_id, chan_2.2);
3294                         assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain.");
3295                 },
3296                 _ => panic!("Unexpected event"),
3297         }
3298         if deliver_bs_raa {
3299                 match events[0] {
3300                         MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
3301                                 assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
3302                                 assert_eq!(update_add_htlcs.len(), 1);
3303                                 assert!(update_fulfill_htlcs.is_empty());
3304                                 assert!(update_fail_htlcs.is_empty());
3305                                 assert!(update_fail_malformed_htlcs.is_empty());
3306                         },
3307                         _ => panic!("Unexpected event"),
3308                 }
3309         }
3310         match events[if deliver_bs_raa { 3 } else { 2 }] {
3311                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
3312                         assert!(update_add_htlcs.is_empty());
3313                         assert_eq!(update_fail_htlcs.len(), 3);
3314                         assert!(update_fulfill_htlcs.is_empty());
3315                         assert!(update_fail_malformed_htlcs.is_empty());
3316                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
3317
3318                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
3319                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]);
3320                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]);
3321
3322                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
3323
3324                         let events = nodes[0].node.get_and_clear_pending_msg_events();
3325                         // If we delivered B's RAA we got an unknown preimage error, not something
3326                         // that we should update our routing table for.
3327                         assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
3328                         for event in events {
3329                                 match event {
3330                                         MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
3331                                         _ => panic!("Unexpected event"),
3332                                 }
3333                         }
3334                         let events = nodes[0].node.get_and_clear_pending_events();
3335                         assert_eq!(events.len(), 3);
3336                         match events[0] {
3337                                 Event::PaymentFailed { ref payment_hash, .. } => {
3338                                         assert!(failed_htlcs.insert(payment_hash.0));
3339                                 },
3340                                 _ => panic!("Unexpected event"),
3341                         }
3342                         match events[1] {
3343                                 Event::PaymentFailed { ref payment_hash, .. } => {
3344                                         assert!(failed_htlcs.insert(payment_hash.0));
3345                                 },
3346                                 _ => panic!("Unexpected event"),
3347                         }
3348                         match events[2] {
3349                                 Event::PaymentFailed { ref payment_hash, .. } => {
3350                                         assert!(failed_htlcs.insert(payment_hash.0));
3351                                 },
3352                                 _ => panic!("Unexpected event"),
3353                         }
3354                 },
3355                 _ => panic!("Unexpected event"),
3356         }
3357
3358         assert!(failed_htlcs.contains(&first_payment_hash.0));
3359         assert!(failed_htlcs.contains(&second_payment_hash.0));
3360         assert!(failed_htlcs.contains(&third_payment_hash.0));
3361 }
3362
3363 #[test]
3364 fn test_commitment_revoked_fail_backward_exhaustive_a() {
3365         do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
3366         do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
3367         do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
3368         do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
3369 }
3370
3371 #[test]
3372 fn test_commitment_revoked_fail_backward_exhaustive_b() {
3373         do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
3374         do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
3375         do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
3376         do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
3377 }
3378
3379 #[test]
3380 fn fail_backward_pending_htlc_upon_channel_failure() {
3381         let chanmon_cfgs = create_chanmon_cfgs(2);
3382         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3383         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3384         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3385         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
3386         let logger = test_utils::TestLogger::new();
3387
3388         // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack.
3389         {
3390                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]);
3391                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3392                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3393                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
3394                 check_added_monitors!(nodes[0], 1);
3395
3396                 let payment_event = {
3397                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3398                         assert_eq!(events.len(), 1);
3399                         SendEvent::from_event(events.remove(0))
3400                 };
3401                 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
3402                 assert_eq!(payment_event.msgs.len(), 1);
3403         }
3404
3405         // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack.
3406         let (_, failed_payment_hash, failed_payment_secret) = get_payment_preimage_hash!(nodes[1]);
3407         {
3408                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3409                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3410                 nodes[0].node.send_payment(&route, failed_payment_hash, &Some(failed_payment_secret)).unwrap();
3411                 check_added_monitors!(nodes[0], 0);
3412
3413                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3414         }
3415
3416         // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel.
3417         {
3418                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
3419
3420                 let secp_ctx = Secp256k1::new();
3421                 let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
3422                 let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
3423                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
3424                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 50_000, TEST_FINAL_CLTV, &logger).unwrap();
3425                 let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(&route.paths[0], 50_000, &Some(payment_secret), current_height, &None).unwrap();
3426                 let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
3427                 let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
3428
3429                 // Send a 0-msat update_add_htlc to fail the channel.
3430                 let update_add_htlc = msgs::UpdateAddHTLC {
3431                         channel_id: chan.2,
3432                         htlc_id: 0,
3433                         amount_msat: 0,
3434                         payment_hash,
3435                         cltv_expiry,
3436                         onion_routing_packet,
3437                 };
3438                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
3439         }
3440
3441         // Check that Alice fails backward the pending HTLC from the second payment.
3442         expect_payment_failed!(nodes[0], failed_payment_hash, true);
3443         check_closed_broadcast!(nodes[0], true);
3444         check_added_monitors!(nodes[0], 1);
3445 }
3446
3447 #[test]
3448 fn test_htlc_ignore_latest_remote_commitment() {
3449         // Test that HTLC transactions spending the latest remote commitment transaction are simply
3450         // ignored if we cannot claim them. This originally tickled an invalid unwrap().
3451         let chanmon_cfgs = create_chanmon_cfgs(2);
3452         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3453         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3454         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3455         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3456
3457         route_payment(&nodes[0], &[&nodes[1]], 10000000);
3458         nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
3459         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
3460         check_closed_broadcast!(nodes[0], true);
3461         check_added_monitors!(nodes[0], 1);
3462
3463         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
3464         assert_eq!(node_txn.len(), 3);
3465         assert_eq!(node_txn[0], node_txn[1]);
3466
3467         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
3468         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
3469         check_closed_broadcast!(nodes[1], true);
3470         check_added_monitors!(nodes[1], 1);
3471
3472         // Duplicate the connect_block call since this may happen due to other listeners
3473         // registering new transactions
3474         header.prev_blockhash = header.block_hash();
3475         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[2].clone()]});
3476 }
3477
3478 #[test]
3479 fn test_force_close_fail_back() {
3480         // Check which HTLCs are failed-backwards on channel force-closure
3481         let chanmon_cfgs = create_chanmon_cfgs(3);
3482         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3483         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3484         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3485         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3486         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3487         let logger = test_utils::TestLogger::new();
3488
3489         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
3490
3491         let mut payment_event = {
3492                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3493                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, 42, &logger).unwrap();
3494                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
3495                 check_added_monitors!(nodes[0], 1);
3496
3497                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3498                 assert_eq!(events.len(), 1);
3499                 SendEvent::from_event(events.remove(0))
3500         };
3501
3502         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3503         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
3504
3505         expect_pending_htlcs_forwardable!(nodes[1]);
3506
3507         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3508         assert_eq!(events_2.len(), 1);
3509         payment_event = SendEvent::from_event(events_2.remove(0));
3510         assert_eq!(payment_event.msgs.len(), 1);
3511
3512         check_added_monitors!(nodes[1], 1);
3513         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
3514         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
3515         check_added_monitors!(nodes[2], 1);
3516         let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3517
3518         // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
3519         // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
3520         // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
3521
3522         nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
3523         check_closed_broadcast!(nodes[2], true);
3524         check_added_monitors!(nodes[2], 1);
3525         let tx = {
3526                 let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3527                 // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
3528                 // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
3529                 // back to nodes[1] upon timeout otherwise.
3530                 assert_eq!(node_txn.len(), 1);
3531                 node_txn.remove(0)
3532         };
3533
3534         mine_transaction(&nodes[1], &tx);
3535
3536         // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
3537         check_closed_broadcast!(nodes[1], true);
3538         check_added_monitors!(nodes[1], 1);
3539
3540         // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
3541         {
3542                 let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap();
3543                 monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
3544                         .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger);
3545         }
3546         mine_transaction(&nodes[2], &tx);
3547         let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
3548         assert_eq!(node_txn.len(), 1);
3549         assert_eq!(node_txn[0].input.len(), 1);
3550         assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
3551         assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
3552         assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
3553
3554         check_spends!(node_txn[0], tx);
3555 }
3556
3557 #[test]
3558 fn test_dup_events_on_peer_disconnect() {
3559         // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3560         // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3561         // as we used to generate the event immediately upon receipt of the payment preimage in the
3562         // update_fulfill_htlc message.
3563
3564         let chanmon_cfgs = create_chanmon_cfgs(2);
3565         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3566         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3567         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3568         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3569
3570         let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1000000).0;
3571
3572         assert!(nodes[1].node.claim_funds(payment_preimage));
3573         check_added_monitors!(nodes[1], 1);
3574         let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3575         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
3576         expect_payment_sent!(nodes[0], payment_preimage);
3577
3578         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3579         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3580
3581         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3582         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
3583 }
3584
3585 #[test]
3586 fn test_simple_peer_disconnect() {
3587         // Test that we can reconnect when there are no lost messages
3588         let chanmon_cfgs = create_chanmon_cfgs(3);
3589         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3590         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3591         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3592         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3593         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
3594
3595         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3596         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3597         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3598
3599         let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3600         let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3601         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
3602         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
3603
3604         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3605         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3606         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3607
3608         let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3609         let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
3610         let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3611         let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
3612
3613         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3614         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3615
3616         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
3617         fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
3618
3619         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
3620         {
3621                 let events = nodes[0].node.get_and_clear_pending_events();
3622                 assert_eq!(events.len(), 2);
3623                 match events[0] {
3624                         Event::PaymentSent { payment_preimage } => {
3625                                 assert_eq!(payment_preimage, payment_preimage_3);
3626                         },
3627                         _ => panic!("Unexpected event"),
3628                 }
3629                 match events[1] {
3630                         Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
3631                                 assert_eq!(payment_hash, payment_hash_5);
3632                                 assert!(rejected_by_dest);
3633                         },
3634                         _ => panic!("Unexpected event"),
3635                 }
3636         }
3637
3638         claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
3639         fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
3640 }
3641
3642 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) {
3643         // Test that we can reconnect when in-flight HTLC updates get dropped
3644         let chanmon_cfgs = create_chanmon_cfgs(2);
3645         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3646         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3647         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3648
3649         let mut as_funding_locked = None;
3650         if messages_delivered == 0 {
3651                 let (funding_locked, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
3652                 as_funding_locked = Some(funding_locked);
3653                 // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
3654                 // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
3655                 // it before the channel_reestablish message.
3656         } else {
3657                 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
3658         }
3659
3660         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
3661
3662         let logger = test_utils::TestLogger::new();
3663         let payment_event = {
3664                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3665                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
3666                         &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
3667                         &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3668                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
3669                 check_added_monitors!(nodes[0], 1);
3670
3671                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
3672                 assert_eq!(events.len(), 1);
3673                 SendEvent::from_event(events.remove(0))
3674         };
3675         assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
3676
3677         if messages_delivered < 2 {
3678                 // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
3679         } else {
3680                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
3681                 if messages_delivered >= 3 {
3682                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
3683                         check_added_monitors!(nodes[1], 1);
3684                         let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
3685
3686                         if messages_delivered >= 4 {
3687                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3688                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3689                                 check_added_monitors!(nodes[0], 1);
3690
3691                                 if messages_delivered >= 5 {
3692                                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
3693                                         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
3694                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3695                                         check_added_monitors!(nodes[0], 1);
3696
3697                                         if messages_delivered >= 6 {
3698                                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3699                                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3700                                                 check_added_monitors!(nodes[1], 1);
3701                                         }
3702                                 }
3703                         }
3704                 }
3705         }
3706
3707         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3708         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3709         if messages_delivered < 3 {
3710                 if simulate_broken_lnd {
3711                         // lnd has a long-standing bug where they send a funding_locked prior to a
3712                         // channel_reestablish if you reconnect prior to funding_locked time.
3713                         //
3714                         // Here we simulate that behavior, delivering a funding_locked immediately on
3715                         // reconnect. Note that we don't bother skipping the now-duplicate funding_locked sent
3716                         // in `reconnect_nodes` but we currently don't fail based on that.
3717                         //
3718                         // See-also <https://github.com/lightningnetwork/lnd/issues/4006>
3719                         nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked.as_ref().unwrap().0);
3720                 }
3721                 // Even if the funding_locked messages get exchanged, as long as nothing further was
3722                 // received on either side, both sides will need to resend them.
3723                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3724         } else if messages_delivered == 3 {
3725                 // nodes[0] still wants its RAA + commitment_signed
3726                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3727         } else if messages_delivered == 4 {
3728                 // nodes[0] still wants its commitment_signed
3729                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3730         } else if messages_delivered == 5 {
3731                 // nodes[1] still wants its final RAA
3732                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3733         } else if messages_delivered == 6 {
3734                 // Everything was delivered...
3735                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3736         }
3737
3738         let events_1 = nodes[1].node.get_and_clear_pending_events();
3739         assert_eq!(events_1.len(), 1);
3740         match events_1[0] {
3741                 Event::PendingHTLCsForwardable { .. } => { },
3742                 _ => panic!("Unexpected event"),
3743         };
3744
3745         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3746         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3747         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3748
3749         nodes[1].node.process_pending_htlc_forwards();
3750
3751         let events_2 = nodes[1].node.get_and_clear_pending_events();
3752         assert_eq!(events_2.len(), 1);
3753         match events_2[0] {
3754                 Event::PaymentReceived { ref payment_hash, ref purpose, amt } => {
3755                         assert_eq!(payment_hash_1, *payment_hash);
3756                         assert_eq!(amt, 1000000);
3757                         match &purpose {
3758                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
3759                                         assert!(payment_preimage.is_none());
3760                                         assert_eq!(payment_secret_1, *payment_secret);
3761                                 },
3762                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
3763                         }
3764                 },
3765                 _ => panic!("Unexpected event"),
3766         }
3767
3768         nodes[1].node.claim_funds(payment_preimage_1);
3769         check_added_monitors!(nodes[1], 1);
3770
3771         let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
3772         assert_eq!(events_3.len(), 1);
3773         let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
3774                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
3775                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3776                         assert!(updates.update_add_htlcs.is_empty());
3777                         assert!(updates.update_fail_htlcs.is_empty());
3778                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
3779                         assert!(updates.update_fail_malformed_htlcs.is_empty());
3780                         assert!(updates.update_fee.is_none());
3781                         (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
3782                 },
3783                 _ => panic!("Unexpected event"),
3784         };
3785
3786         if messages_delivered >= 1 {
3787                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc);
3788
3789                 let events_4 = nodes[0].node.get_and_clear_pending_events();
3790                 assert_eq!(events_4.len(), 1);
3791                 match events_4[0] {
3792                         Event::PaymentSent { ref payment_preimage } => {
3793                                 assert_eq!(payment_preimage_1, *payment_preimage);
3794                         },
3795                         _ => panic!("Unexpected event"),
3796                 }
3797
3798                 if messages_delivered >= 2 {
3799                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
3800                         check_added_monitors!(nodes[0], 1);
3801                         let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3802
3803                         if messages_delivered >= 3 {
3804                                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
3805                                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3806                                 check_added_monitors!(nodes[1], 1);
3807
3808                                 if messages_delivered >= 4 {
3809                                         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed);
3810                                         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
3811                                         // No commitment_signed so get_event_msg's assert(len == 1) passes
3812                                         check_added_monitors!(nodes[1], 1);
3813
3814                                         if messages_delivered >= 5 {
3815                                                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
3816                                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3817                                                 check_added_monitors!(nodes[0], 1);
3818                                         }
3819                                 }
3820                         }
3821                 }
3822         }
3823
3824         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3825         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3826         if messages_delivered < 2 {
3827                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
3828                 if messages_delivered < 1 {
3829                         let events_4 = nodes[0].node.get_and_clear_pending_events();
3830                         assert_eq!(events_4.len(), 1);
3831                         match events_4[0] {
3832                                 Event::PaymentSent { ref payment_preimage } => {
3833                                         assert_eq!(payment_preimage_1, *payment_preimage);
3834                                 },
3835                                 _ => panic!("Unexpected event"),
3836                         }
3837                 } else {
3838                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
3839                 }
3840         } else if messages_delivered == 2 {
3841                 // nodes[0] still wants its RAA + commitment_signed
3842                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
3843         } else if messages_delivered == 3 {
3844                 // nodes[0] still wants its commitment_signed
3845                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3846         } else if messages_delivered == 4 {
3847                 // nodes[1] still wants its final RAA
3848                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
3849         } else if messages_delivered == 5 {
3850                 // Everything was delivered...
3851                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3852         }
3853
3854         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3855         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3856         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3857
3858         // Channel should still work fine...
3859         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3860         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
3861                 &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
3862                 &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3863         let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
3864         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
3865 }
3866
3867 #[test]
3868 fn test_drop_messages_peer_disconnect_a() {
3869         do_test_drop_messages_peer_disconnect(0, true);
3870         do_test_drop_messages_peer_disconnect(0, false);
3871         do_test_drop_messages_peer_disconnect(1, false);
3872         do_test_drop_messages_peer_disconnect(2, false);
3873 }
3874
3875 #[test]
3876 fn test_drop_messages_peer_disconnect_b() {
3877         do_test_drop_messages_peer_disconnect(3, false);
3878         do_test_drop_messages_peer_disconnect(4, false);
3879         do_test_drop_messages_peer_disconnect(5, false);
3880         do_test_drop_messages_peer_disconnect(6, false);
3881 }
3882
3883 #[test]
3884 fn test_funding_peer_disconnect() {
3885         // Test that we can lock in our funding tx while disconnected
3886         let chanmon_cfgs = create_chanmon_cfgs(2);
3887         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3888         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3889         let persister: test_utils::TestPersister;
3890         let new_chain_monitor: test_utils::TestChainMonitor;
3891         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
3892         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3893         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
3894
3895         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3896         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3897
3898         confirm_transaction(&nodes[0], &tx);
3899         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
3900         assert_eq!(events_1.len(), 1);
3901         match events_1[0] {
3902                 MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
3903                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
3904                 },
3905                 _ => panic!("Unexpected event"),
3906         }
3907
3908         reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3909
3910         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
3911         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3912
3913         confirm_transaction(&nodes[1], &tx);
3914         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
3915         assert_eq!(events_2.len(), 2);
3916         let funding_locked = match events_2[0] {
3917                 MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
3918                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3919                         msg.clone()
3920                 },
3921                 _ => panic!("Unexpected event"),
3922         };
3923         let bs_announcement_sigs = match events_2[1] {
3924                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3925                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
3926                         msg.clone()
3927                 },
3928                 _ => panic!("Unexpected event"),
3929         };
3930
3931         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
3932
3933         nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked);
3934         nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
3935         let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
3936         assert_eq!(events_3.len(), 2);
3937         let as_announcement_sigs = match events_3[0] {
3938                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
3939                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
3940                         msg.clone()
3941                 },
3942                 _ => panic!("Unexpected event"),
3943         };
3944         let (as_announcement, as_update) = match events_3[1] {
3945                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3946                         (msg.clone(), update_msg.clone())
3947                 },
3948                 _ => panic!("Unexpected event"),
3949         };
3950
3951         nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
3952         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
3953         assert_eq!(events_4.len(), 1);
3954         let (_, bs_update) = match events_4[0] {
3955                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
3956                         (msg.clone(), update_msg.clone())
3957                 },
3958                 _ => panic!("Unexpected event"),
3959         };
3960
3961         nodes[0].net_graph_msg_handler.handle_channel_announcement(&as_announcement).unwrap();
3962         nodes[0].net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
3963         nodes[0].net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
3964
3965         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
3966         let logger = test_utils::TestLogger::new();
3967         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
3968         let (payment_preimage, _, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
3969         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
3970
3971         // Check that after deserialization and reconnection we can still generate an identical
3972         // channel_announcement from the cached signatures.
3973         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
3974
3975         let nodes_0_serialized = nodes[0].node.encode();
3976         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
3977         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
3978
3979         persister = test_utils::TestPersister::new();
3980         let keys_manager = &chanmon_cfgs[0].keys_manager;
3981         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
3982         nodes[0].chain_monitor = &new_chain_monitor;
3983         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
3984         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
3985                 &mut chan_0_monitor_read, keys_manager).unwrap();
3986         assert!(chan_0_monitor_read.is_empty());
3987
3988         let mut nodes_0_read = &nodes_0_serialized[..];
3989         let (_, nodes_0_deserialized_tmp) = {
3990                 let mut channel_monitors = HashMap::new();
3991                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
3992                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
3993                         default_config: UserConfig::default(),
3994                         keys_manager,
3995                         fee_estimator: node_cfgs[0].fee_estimator,
3996                         chain_monitor: nodes[0].chain_monitor,
3997                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
3998                         logger: nodes[0].logger,
3999                         channel_monitors,
4000                 }).unwrap()
4001         };
4002         nodes_0_deserialized = nodes_0_deserialized_tmp;
4003         assert!(nodes_0_read.is_empty());
4004
4005         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4006         nodes[0].node = &nodes_0_deserialized;
4007         check_added_monitors!(nodes[0], 1);
4008
4009         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4010
4011         // as_announcement should be re-generated exactly by broadcast_node_announcement.
4012         nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new());
4013         let msgs = nodes[0].node.get_and_clear_pending_msg_events();
4014         let mut found_announcement = false;
4015         for event in msgs.iter() {
4016                 match event {
4017                         MessageSendEvent::BroadcastChannelAnnouncement { ref msg, .. } => {
4018                                 if *msg == as_announcement { found_announcement = true; }
4019                         },
4020                         MessageSendEvent::BroadcastNodeAnnouncement { .. } => {},
4021                         _ => panic!("Unexpected event"),
4022                 }
4023         }
4024         assert!(found_announcement);
4025 }
4026
4027 #[test]
4028 fn test_drop_messages_peer_disconnect_dual_htlc() {
4029         // Test that we can handle reconnecting when both sides of a channel have pending
4030         // commitment_updates when we disconnect.
4031         let chanmon_cfgs = create_chanmon_cfgs(2);
4032         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4033         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4034         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4035         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4036         let logger = test_utils::TestLogger::new();
4037
4038         let (payment_preimage_1, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4039
4040         // Now try to send a second payment which will fail to send
4041         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
4042         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4043         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
4044         nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
4045         check_added_monitors!(nodes[0], 1);
4046
4047         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
4048         assert_eq!(events_1.len(), 1);
4049         match events_1[0] {
4050                 MessageSendEvent::UpdateHTLCs { .. } => {},
4051                 _ => panic!("Unexpected event"),
4052         }
4053
4054         assert!(nodes[1].node.claim_funds(payment_preimage_1));
4055         check_added_monitors!(nodes[1], 1);
4056
4057         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
4058         assert_eq!(events_2.len(), 1);
4059         match events_2[0] {
4060                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
4061                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
4062                         assert!(update_add_htlcs.is_empty());
4063                         assert_eq!(update_fulfill_htlcs.len(), 1);
4064                         assert!(update_fail_htlcs.is_empty());
4065                         assert!(update_fail_malformed_htlcs.is_empty());
4066                         assert!(update_fee.is_none());
4067
4068                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
4069                         let events_3 = nodes[0].node.get_and_clear_pending_events();
4070                         assert_eq!(events_3.len(), 1);
4071                         match events_3[0] {
4072                                 Event::PaymentSent { ref payment_preimage } => {
4073                                         assert_eq!(*payment_preimage, payment_preimage_1);
4074                                 },
4075                                 _ => panic!("Unexpected event"),
4076                         }
4077
4078                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
4079                         let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4080                         // No commitment_signed so get_event_msg's assert(len == 1) passes
4081                         check_added_monitors!(nodes[0], 1);
4082                 },
4083                 _ => panic!("Unexpected event"),
4084         }
4085
4086         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4087         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4088
4089         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4090         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4091         assert_eq!(reestablish_1.len(), 1);
4092         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4093         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4094         assert_eq!(reestablish_2.len(), 1);
4095
4096         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4097         let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
4098         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4099         let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
4100
4101         assert!(as_resp.0.is_none());
4102         assert!(bs_resp.0.is_none());
4103
4104         assert!(bs_resp.1.is_none());
4105         assert!(bs_resp.2.is_none());
4106
4107         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
4108
4109         assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
4110         assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
4111         assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
4112         assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
4113         assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
4114         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]);
4115         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed);
4116         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4117         // No commitment_signed so get_event_msg's assert(len == 1) passes
4118         check_added_monitors!(nodes[1], 1);
4119
4120         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap());
4121         let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4122         assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
4123         assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
4124         assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
4125         assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
4126         assert!(bs_second_commitment_signed.update_fee.is_none());
4127         check_added_monitors!(nodes[1], 1);
4128
4129         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
4130         let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
4131         assert!(as_commitment_signed.update_add_htlcs.is_empty());
4132         assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
4133         assert!(as_commitment_signed.update_fail_htlcs.is_empty());
4134         assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
4135         assert!(as_commitment_signed.update_fee.is_none());
4136         check_added_monitors!(nodes[0], 1);
4137
4138         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed);
4139         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
4140         // No commitment_signed so get_event_msg's assert(len == 1) passes
4141         check_added_monitors!(nodes[0], 1);
4142
4143         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed);
4144         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
4145         // No commitment_signed so get_event_msg's assert(len == 1) passes
4146         check_added_monitors!(nodes[1], 1);
4147
4148         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
4149         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4150         check_added_monitors!(nodes[1], 1);
4151
4152         expect_pending_htlcs_forwardable!(nodes[1]);
4153
4154         let events_5 = nodes[1].node.get_and_clear_pending_events();
4155         assert_eq!(events_5.len(), 1);
4156         match events_5[0] {
4157                 Event::PaymentReceived { ref payment_hash, ref purpose, .. } => {
4158                         assert_eq!(payment_hash_2, *payment_hash);
4159                         match &purpose {
4160                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
4161                                         assert!(payment_preimage.is_none());
4162                                         assert_eq!(payment_secret_2, *payment_secret);
4163                                 },
4164                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
4165                         }
4166                 },
4167                 _ => panic!("Unexpected event"),
4168         }
4169
4170         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
4171         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4172         check_added_monitors!(nodes[0], 1);
4173
4174         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
4175 }
4176
4177 fn do_test_htlc_timeout(send_partial_mpp: bool) {
4178         // If the user fails to claim/fail an HTLC within the HTLC CLTV timeout we fail it for them
4179         // to avoid our counterparty failing the channel.
4180         let chanmon_cfgs = create_chanmon_cfgs(2);
4181         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4182         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4183         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4184
4185         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4186         let logger = test_utils::TestLogger::new();
4187
4188         let our_payment_hash = if send_partial_mpp {
4189                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4190                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4191                 let (_, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
4192                 // Use the utility function send_payment_along_path to send the payment with MPP data which
4193                 // indicates there are more HTLCs coming.
4194                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
4195                 nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200000, cur_height, &None).unwrap();
4196                 check_added_monitors!(nodes[0], 1);
4197                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
4198                 assert_eq!(events.len(), 1);
4199                 // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
4200                 // hop should *not* yet generate any PaymentReceived event(s).
4201                 pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
4202                 our_payment_hash
4203         } else {
4204                 route_payment(&nodes[0], &[&nodes[1]], 100000).1
4205         };
4206
4207         let mut block = Block {
4208                 header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
4209                 txdata: vec![],
4210         };
4211         connect_block(&nodes[0], &block);
4212         connect_block(&nodes[1], &block);
4213         let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS;
4214         for _ in CHAN_CONFIRM_DEPTH + 2..block_count {
4215                 block.header.prev_blockhash = block.block_hash();
4216                 connect_block(&nodes[0], &block);
4217                 connect_block(&nodes[1], &block);
4218         }
4219
4220         expect_pending_htlcs_forwardable!(nodes[1]);
4221
4222         check_added_monitors!(nodes[1], 1);
4223         let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
4224         assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
4225         assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
4226         assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
4227         assert!(htlc_timeout_updates.update_fee.is_none());
4228
4229         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
4230         commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
4231         // 100_000 msat as u64, followed by the height at which we failed back above
4232         let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec();
4233         expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1));
4234         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
4235 }
4236
4237 #[test]
4238 fn test_htlc_timeout() {
4239         do_test_htlc_timeout(true);
4240         do_test_htlc_timeout(false);
4241 }
4242
4243 fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
4244         // Tests that HTLCs in the holding cell are timed out after the requisite number of blocks.
4245         let chanmon_cfgs = create_chanmon_cfgs(3);
4246         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
4247         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
4248         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
4249         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4250         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
4251
4252         // Make sure all nodes are at the same starting height
4253         connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
4254         connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
4255         connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
4256
4257         let logger = test_utils::TestLogger::new();
4258
4259         // Route a first payment to get the 1 -> 2 channel in awaiting_raa...
4260         let (_, first_payment_hash, first_payment_secret) = get_payment_preimage_hash!(nodes[2]);
4261         {
4262                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
4263                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4264                 nodes[1].node.send_payment(&route, first_payment_hash, &Some(first_payment_secret)).unwrap();
4265         }
4266         assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
4267         check_added_monitors!(nodes[1], 1);
4268
4269         // Now attempt to route a second payment, which should be placed in the holding cell
4270         let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[2]);
4271         if forwarded_htlc {
4272                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
4273                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4274                 nodes[0].node.send_payment(&route, second_payment_hash, &Some(first_payment_secret)).unwrap();
4275                 check_added_monitors!(nodes[0], 1);
4276                 let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
4277                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
4278                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
4279                 expect_pending_htlcs_forwardable!(nodes[1]);
4280                 check_added_monitors!(nodes[1], 0);
4281         } else {
4282                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
4283                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
4284                 nodes[1].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
4285                 check_added_monitors!(nodes[1], 0);
4286         }
4287
4288         connect_blocks(&nodes[1], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
4289         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4290         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
4291         connect_blocks(&nodes[1], 1);
4292
4293         if forwarded_htlc {
4294                 expect_pending_htlcs_forwardable!(nodes[1]);
4295                 check_added_monitors!(nodes[1], 1);
4296                 let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
4297                 assert_eq!(fail_commit.len(), 1);
4298                 match fail_commit[0] {
4299                         MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => {
4300                                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
4301                                 commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true);
4302                         },
4303                         _ => unreachable!(),
4304                 }
4305                 expect_payment_failed!(nodes[0], second_payment_hash, false);
4306                 expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, false);
4307         } else {
4308                 expect_payment_failed!(nodes[1], second_payment_hash, true);
4309         }
4310 }
4311
4312 #[test]
4313 fn test_holding_cell_htlc_add_timeouts() {
4314         do_test_holding_cell_htlc_add_timeouts(false);
4315         do_test_holding_cell_htlc_add_timeouts(true);
4316 }
4317
4318 #[test]
4319 fn test_invalid_channel_announcement() {
4320         //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
4321         let secp_ctx = Secp256k1::new();
4322         let chanmon_cfgs = create_chanmon_cfgs(2);
4323         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4324         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4325         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4326
4327         let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
4328
4329         let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
4330         let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
4331         let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4332         let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
4333
4334         nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
4335
4336         let as_bitcoin_key = as_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
4337         let bs_bitcoin_key = bs_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
4338
4339         let as_network_key = nodes[0].node.get_our_node_id();
4340         let bs_network_key = nodes[1].node.get_our_node_id();
4341
4342         let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
4343
4344         let mut chan_announcement;
4345
4346         macro_rules! dummy_unsigned_msg {
4347                 () => {
4348                         msgs::UnsignedChannelAnnouncement {
4349                                 features: ChannelFeatures::known(),
4350                                 chain_hash: genesis_block(Network::Testnet).header.block_hash(),
4351                                 short_channel_id: as_chan.get_short_channel_id().unwrap(),
4352                                 node_id_1: if were_node_one { as_network_key } else { bs_network_key },
4353                                 node_id_2: if were_node_one { bs_network_key } else { as_network_key },
4354                                 bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
4355                                 bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
4356                                 excess_data: Vec::new(),
4357                         };
4358                 }
4359         }
4360
4361         macro_rules! sign_msg {
4362                 ($unsigned_msg: expr) => {
4363                         let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
4364                         let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_signer().inner.funding_key);
4365                         let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_signer().inner.funding_key);
4366                         let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
4367                         let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
4368                         chan_announcement = msgs::ChannelAnnouncement {
4369                                 node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
4370                                 node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
4371                                 bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
4372                                 bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
4373                                 contents: $unsigned_msg
4374                         }
4375                 }
4376         }
4377
4378         let unsigned_msg = dummy_unsigned_msg!();
4379         sign_msg!(unsigned_msg);
4380         assert_eq!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).unwrap(), true);
4381         let _ = nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
4382
4383         // Configured with Network::Testnet
4384         let mut unsigned_msg = dummy_unsigned_msg!();
4385         unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.block_hash();
4386         sign_msg!(unsigned_msg);
4387         assert!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).is_err());
4388
4389         let mut unsigned_msg = dummy_unsigned_msg!();
4390         unsigned_msg.chain_hash = BlockHash::hash(&[1,2,3,4,5,6,7,8,9]);
4391         sign_msg!(unsigned_msg);
4392         assert!(nodes[0].net_graph_msg_handler.handle_channel_announcement(&chan_announcement).is_err());
4393 }
4394
4395 #[test]
4396 fn test_no_txn_manager_serialize_deserialize() {
4397         let chanmon_cfgs = create_chanmon_cfgs(2);
4398         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4399         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4400         let logger: test_utils::TestLogger;
4401         let fee_estimator: test_utils::TestFeeEstimator;
4402         let persister: test_utils::TestPersister;
4403         let new_chain_monitor: test_utils::TestChainMonitor;
4404         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4405         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4406
4407         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
4408
4409         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4410
4411         let nodes_0_serialized = nodes[0].node.encode();
4412         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4413         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4414
4415         logger = test_utils::TestLogger::new();
4416         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4417         persister = test_utils::TestPersister::new();
4418         let keys_manager = &chanmon_cfgs[0].keys_manager;
4419         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4420         nodes[0].chain_monitor = &new_chain_monitor;
4421         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4422         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4423                 &mut chan_0_monitor_read, keys_manager).unwrap();
4424         assert!(chan_0_monitor_read.is_empty());
4425
4426         let mut nodes_0_read = &nodes_0_serialized[..];
4427         let config = UserConfig::default();
4428         let (_, nodes_0_deserialized_tmp) = {
4429                 let mut channel_monitors = HashMap::new();
4430                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4431                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4432                         default_config: config,
4433                         keys_manager,
4434                         fee_estimator: &fee_estimator,
4435                         chain_monitor: nodes[0].chain_monitor,
4436                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4437                         logger: &logger,
4438                         channel_monitors,
4439                 }).unwrap()
4440         };
4441         nodes_0_deserialized = nodes_0_deserialized_tmp;
4442         assert!(nodes_0_read.is_empty());
4443
4444         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4445         nodes[0].node = &nodes_0_deserialized;
4446         assert_eq!(nodes[0].node.list_channels().len(), 1);
4447         check_added_monitors!(nodes[0], 1);
4448
4449         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4450         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4451         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4452         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4453
4454         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4455         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4456         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4457         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4458
4459         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
4460         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
4461         for node in nodes.iter() {
4462                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
4463                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
4464                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
4465         }
4466
4467         send_payment(&nodes[0], &[&nodes[1]], 1000000);
4468 }
4469
4470 #[test]
4471 fn test_dup_htlc_onchain_fails_on_reload() {
4472         // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
4473         // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
4474         // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
4475         // the ChannelMonitor tells it to.
4476         //
4477         // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4478         // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4479         // PaymentFailed event appearing). However, because we may not serialize the relevant
4480         // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4481         // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4482         // and de-duplicates ChannelMonitor events.
4483         //
4484         // This tests that explicit tracking behavior.
4485         let chanmon_cfgs = create_chanmon_cfgs(2);
4486         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4487         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4488         let persister: test_utils::TestPersister;
4489         let new_chain_monitor: test_utils::TestChainMonitor;
4490         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4491         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4492
4493         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4494
4495         // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
4496         // nodes[0].
4497         let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
4498         nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
4499         check_closed_broadcast!(nodes[0], true);
4500         check_added_monitors!(nodes[0], 1);
4501
4502         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
4503         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4504
4505         // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
4506         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
4507         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4508         assert_eq!(node_txn.len(), 3);
4509         assert_eq!(node_txn[0], node_txn[1]);
4510
4511         assert!(nodes[1].node.claim_funds(payment_preimage));
4512         check_added_monitors!(nodes[1], 1);
4513
4514         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4515         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4516         check_closed_broadcast!(nodes[1], true);
4517         check_added_monitors!(nodes[1], 1);
4518         let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
4519
4520         header.prev_blockhash = nodes[0].best_block_hash();
4521         connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4522
4523         // Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4524         // fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4525         // happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4526         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4527         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4528
4529         header.prev_blockhash = nodes[0].best_block_hash();
4530         let claim_block = Block { header, txdata: claim_txn};
4531         connect_block(&nodes[0], &claim_block);
4532         expect_payment_sent!(nodes[0], payment_preimage);
4533
4534         // ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4535         // connected a highly-relevant block, it likely gets serialized out now.
4536         let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4537         nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4538
4539         // Now reload nodes[0]...
4540         persister = test_utils::TestPersister::new();
4541         let keys_manager = &chanmon_cfgs[0].keys_manager;
4542         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
4543         nodes[0].chain_monitor = &new_chain_monitor;
4544         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4545         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4546                 &mut chan_0_monitor_read, keys_manager).unwrap();
4547         assert!(chan_0_monitor_read.is_empty());
4548
4549         let (_, nodes_0_deserialized_tmp) = {
4550                 let mut channel_monitors = HashMap::new();
4551                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4552                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
4553                         ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
4554                                 default_config: Default::default(),
4555                                 keys_manager,
4556                                 fee_estimator: node_cfgs[0].fee_estimator,
4557                                 chain_monitor: nodes[0].chain_monitor,
4558                                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4559                                 logger: nodes[0].logger,
4560                                 channel_monitors,
4561                         }).unwrap()
4562         };
4563         nodes_0_deserialized = nodes_0_deserialized_tmp;
4564
4565         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4566         check_added_monitors!(nodes[0], 1);
4567         nodes[0].node = &nodes_0_deserialized;
4568
4569         // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
4570         // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
4571         // payment events should kick in, leaving us with no pending events here.
4572         let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
4573         nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
4574         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4575 }
4576
4577 #[test]
4578 fn test_manager_serialize_deserialize_events() {
4579         // This test makes sure the events field in ChannelManager survives de/serialization
4580         let chanmon_cfgs = create_chanmon_cfgs(2);
4581         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4582         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4583         let fee_estimator: test_utils::TestFeeEstimator;
4584         let persister: test_utils::TestPersister;
4585         let logger: test_utils::TestLogger;
4586         let new_chain_monitor: test_utils::TestChainMonitor;
4587         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4588         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4589
4590         // Start creating a channel, but stop right before broadcasting the funding transaction
4591         let channel_value = 100000;
4592         let push_msat = 10001;
4593         let a_flags = InitFeatures::known();
4594         let b_flags = InitFeatures::known();
4595         let node_a = nodes.remove(0);
4596         let node_b = nodes.remove(0);
4597         node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
4598         node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
4599         node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
4600
4601         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, channel_value, 42);
4602
4603         node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
4604         check_added_monitors!(node_a, 0);
4605
4606         node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
4607         {
4608                 let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
4609                 assert_eq!(added_monitors.len(), 1);
4610                 assert_eq!(added_monitors[0].0, funding_output);
4611                 added_monitors.clear();
4612         }
4613
4614         node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
4615         {
4616                 let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
4617                 assert_eq!(added_monitors.len(), 1);
4618                 assert_eq!(added_monitors[0].0, funding_output);
4619                 added_monitors.clear();
4620         }
4621         // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
4622
4623         nodes.push(node_a);
4624         nodes.push(node_b);
4625
4626         // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
4627         let nodes_0_serialized = nodes[0].node.encode();
4628         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4629         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4630
4631         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4632         logger = test_utils::TestLogger::new();
4633         persister = test_utils::TestPersister::new();
4634         let keys_manager = &chanmon_cfgs[0].keys_manager;
4635         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4636         nodes[0].chain_monitor = &new_chain_monitor;
4637         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4638         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4639                 &mut chan_0_monitor_read, keys_manager).unwrap();
4640         assert!(chan_0_monitor_read.is_empty());
4641
4642         let mut nodes_0_read = &nodes_0_serialized[..];
4643         let config = UserConfig::default();
4644         let (_, nodes_0_deserialized_tmp) = {
4645                 let mut channel_monitors = HashMap::new();
4646                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4647                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4648                         default_config: config,
4649                         keys_manager,
4650                         fee_estimator: &fee_estimator,
4651                         chain_monitor: nodes[0].chain_monitor,
4652                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4653                         logger: &logger,
4654                         channel_monitors,
4655                 }).unwrap()
4656         };
4657         nodes_0_deserialized = nodes_0_deserialized_tmp;
4658         assert!(nodes_0_read.is_empty());
4659
4660         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4661
4662         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4663         nodes[0].node = &nodes_0_deserialized;
4664
4665         // After deserializing, make sure the funding_transaction is still held by the channel manager
4666         let events_4 = nodes[0].node.get_and_clear_pending_events();
4667         assert_eq!(events_4.len(), 0);
4668         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
4669         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
4670
4671         // Make sure the channel is functioning as though the de/serialization never happened
4672         assert_eq!(nodes[0].node.list_channels().len(), 1);
4673         check_added_monitors!(nodes[0], 1);
4674
4675         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4676         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
4677         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4678         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
4679
4680         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
4681         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
4682         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
4683         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
4684
4685         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
4686         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
4687         for node in nodes.iter() {
4688                 assert!(node.net_graph_msg_handler.handle_channel_announcement(&announcement).unwrap());
4689                 node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
4690                 node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
4691         }
4692
4693         send_payment(&nodes[0], &[&nodes[1]], 1000000);
4694 }
4695
4696 #[test]
4697 fn test_simple_manager_serialize_deserialize() {
4698         let chanmon_cfgs = create_chanmon_cfgs(2);
4699         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4700         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4701         let logger: test_utils::TestLogger;
4702         let fee_estimator: test_utils::TestFeeEstimator;
4703         let persister: test_utils::TestPersister;
4704         let new_chain_monitor: test_utils::TestChainMonitor;
4705         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4706         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4707         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4708
4709         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4710         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
4711
4712         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4713
4714         let nodes_0_serialized = nodes[0].node.encode();
4715         let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4716         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
4717
4718         logger = test_utils::TestLogger::new();
4719         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4720         persister = test_utils::TestPersister::new();
4721         let keys_manager = &chanmon_cfgs[0].keys_manager;
4722         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4723         nodes[0].chain_monitor = &new_chain_monitor;
4724         let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
4725         let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
4726                 &mut chan_0_monitor_read, keys_manager).unwrap();
4727         assert!(chan_0_monitor_read.is_empty());
4728
4729         let mut nodes_0_read = &nodes_0_serialized[..];
4730         let (_, nodes_0_deserialized_tmp) = {
4731                 let mut channel_monitors = HashMap::new();
4732                 channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
4733                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4734                         default_config: UserConfig::default(),
4735                         keys_manager,
4736                         fee_estimator: &fee_estimator,
4737                         chain_monitor: nodes[0].chain_monitor,
4738                         tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4739                         logger: &logger,
4740                         channel_monitors,
4741                 }).unwrap()
4742         };
4743         nodes_0_deserialized = nodes_0_deserialized_tmp;
4744         assert!(nodes_0_read.is_empty());
4745
4746         assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
4747         nodes[0].node = &nodes_0_deserialized;
4748         check_added_monitors!(nodes[0], 1);
4749
4750         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4751
4752         fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
4753         claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
4754 }
4755
4756 #[test]
4757 fn test_manager_serialize_deserialize_inconsistent_monitor() {
4758         // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
4759         let chanmon_cfgs = create_chanmon_cfgs(4);
4760         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
4761         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
4762         let logger: test_utils::TestLogger;
4763         let fee_estimator: test_utils::TestFeeEstimator;
4764         let persister: test_utils::TestPersister;
4765         let new_chain_monitor: test_utils::TestChainMonitor;
4766         let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
4767         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
4768         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
4769         create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
4770         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
4771
4772         let mut node_0_stale_monitors_serialized = Vec::new();
4773         for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
4774                 let mut writer = test_utils::TestVecWriter(Vec::new());
4775                 monitor.1.write(&mut writer).unwrap();
4776                 node_0_stale_monitors_serialized.push(writer.0);
4777         }
4778
4779         let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
4780
4781         // Serialize the ChannelManager here, but the monitor we keep up-to-date
4782         let nodes_0_serialized = nodes[0].node.encode();
4783
4784         route_payment(&nodes[0], &[&nodes[3]], 1000000);
4785         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4786         nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4787         nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
4788
4789         // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
4790         // nodes[3])
4791         let mut node_0_monitors_serialized = Vec::new();
4792         for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
4793                 let mut writer = test_utils::TestVecWriter(Vec::new());
4794                 monitor.1.write(&mut writer).unwrap();
4795                 node_0_monitors_serialized.push(writer.0);
4796         }
4797
4798         logger = test_utils::TestLogger::new();
4799         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
4800         persister = test_utils::TestPersister::new();
4801         let keys_manager = &chanmon_cfgs[0].keys_manager;
4802         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
4803         nodes[0].chain_monitor = &new_chain_monitor;
4804
4805
4806         let mut node_0_stale_monitors = Vec::new();
4807         for serialized in node_0_stale_monitors_serialized.iter() {
4808                 let mut read = &serialized[..];
4809                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
4810                 assert!(read.is_empty());
4811                 node_0_stale_monitors.push(monitor);
4812         }
4813
4814         let mut node_0_monitors = Vec::new();
4815         for serialized in node_0_monitors_serialized.iter() {
4816                 let mut read = &serialized[..];
4817                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
4818                 assert!(read.is_empty());
4819                 node_0_monitors.push(monitor);
4820         }
4821
4822         let mut nodes_0_read = &nodes_0_serialized[..];
4823         if let Err(msgs::DecodeError::InvalidValue) =
4824                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4825                 default_config: UserConfig::default(),
4826                 keys_manager,
4827                 fee_estimator: &fee_estimator,
4828                 chain_monitor: nodes[0].chain_monitor,
4829                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4830                 logger: &logger,
4831                 channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
4832         }) { } else {
4833                 panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
4834         };
4835
4836         let mut nodes_0_read = &nodes_0_serialized[..];
4837         let (_, nodes_0_deserialized_tmp) =
4838                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
4839                 default_config: UserConfig::default(),
4840                 keys_manager,
4841                 fee_estimator: &fee_estimator,
4842                 chain_monitor: nodes[0].chain_monitor,
4843                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
4844                 logger: &logger,
4845                 channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
4846         }).unwrap();
4847         nodes_0_deserialized = nodes_0_deserialized_tmp;
4848         assert!(nodes_0_read.is_empty());
4849
4850         { // Channel close should result in a commitment tx
4851                 let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
4852                 assert_eq!(txn.len(), 1);
4853                 check_spends!(txn[0], funding_tx);
4854                 assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
4855         }
4856
4857         for monitor in node_0_monitors.drain(..) {
4858                 assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
4859                 check_added_monitors!(nodes[0], 1);
4860         }
4861         nodes[0].node = &nodes_0_deserialized;
4862
4863         // nodes[1] and nodes[2] have no lost state with nodes[0]...
4864         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4865         reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
4866         //... and we can even still claim the payment!
4867         claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
4868
4869         nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4870         let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
4871         nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
4872         nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
4873         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
4874         assert_eq!(msg_events.len(), 1);
4875         if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
4876                 match action {
4877                         &ErrorAction::SendErrorMessage { ref msg } => {
4878                                 assert_eq!(msg.channel_id, channel_id);
4879                         },
4880                         _ => panic!("Unexpected event!"),
4881                 }
4882         }
4883 }
4884
4885 macro_rules! check_spendable_outputs {
4886         ($node: expr, $keysinterface: expr) => {
4887                 {
4888                         let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
4889                         let mut txn = Vec::new();
4890                         let mut all_outputs = Vec::new();
4891                         let secp_ctx = Secp256k1::new();
4892                         for event in events.drain(..) {
4893                                 match event {
4894                                         Event::SpendableOutputs { mut outputs } => {
4895                                                 for outp in outputs.drain(..) {
4896                                                         txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap());
4897                                                         all_outputs.push(outp);
4898                                                 }
4899                                         },
4900                                         _ => panic!("Unexpected event"),
4901                                 };
4902                         }
4903                         if all_outputs.len() > 1 {
4904                                 if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) {
4905                                         txn.push(tx);
4906                                 }
4907                         }
4908                         txn
4909                 }
4910         }
4911 }
4912
4913 #[test]
4914 fn test_claim_sizeable_push_msat() {
4915         // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
4916         let chanmon_cfgs = create_chanmon_cfgs(2);
4917         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4918         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4919         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4920
4921         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
4922         nodes[1].node.force_close_channel(&chan.2).unwrap();
4923         check_closed_broadcast!(nodes[1], true);
4924         check_added_monitors!(nodes[1], 1);
4925         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4926         assert_eq!(node_txn.len(), 1);
4927         check_spends!(node_txn[0], chan.3);
4928         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4929
4930         mine_transaction(&nodes[1], &node_txn[0]);
4931         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
4932
4933         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4934         assert_eq!(spend_txn.len(), 1);
4935         assert_eq!(spend_txn[0].input.len(), 1);
4936         check_spends!(spend_txn[0], node_txn[0]);
4937         assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
4938 }
4939
4940 #[test]
4941 fn test_claim_on_remote_sizeable_push_msat() {
4942         // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4943         // to_remote output is encumbered by a P2WPKH
4944         let chanmon_cfgs = create_chanmon_cfgs(2);
4945         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4946         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4947         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4948
4949         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
4950         nodes[0].node.force_close_channel(&chan.2).unwrap();
4951         check_closed_broadcast!(nodes[0], true);
4952         check_added_monitors!(nodes[0], 1);
4953
4954         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
4955         assert_eq!(node_txn.len(), 1);
4956         check_spends!(node_txn[0], chan.3);
4957         assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
4958
4959         mine_transaction(&nodes[1], &node_txn[0]);
4960         check_closed_broadcast!(nodes[1], true);
4961         check_added_monitors!(nodes[1], 1);
4962         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4963
4964         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4965         assert_eq!(spend_txn.len(), 1);
4966         check_spends!(spend_txn[0], node_txn[0]);
4967 }
4968
4969 #[test]
4970 fn test_claim_on_remote_revoked_sizeable_push_msat() {
4971         // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
4972         // to_remote output is encumbered by a P2WPKH
4973
4974         let chanmon_cfgs = create_chanmon_cfgs(2);
4975         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
4976         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
4977         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
4978
4979         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, InitFeatures::known(), InitFeatures::known());
4980         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
4981         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
4982         assert_eq!(revoked_local_txn[0].input.len(), 1);
4983         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
4984
4985         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
4986         mine_transaction(&nodes[1], &revoked_local_txn[0]);
4987         check_closed_broadcast!(nodes[1], true);
4988         check_added_monitors!(nodes[1], 1);
4989
4990         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
4991         mine_transaction(&nodes[1], &node_txn[0]);
4992         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
4993
4994         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
4995         assert_eq!(spend_txn.len(), 3);
4996         check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
4997         check_spends!(spend_txn[1], node_txn[0]);
4998         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
4999 }
5000
5001 #[test]
5002 fn test_static_spendable_outputs_preimage_tx() {
5003         let chanmon_cfgs = create_chanmon_cfgs(2);
5004         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5005         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5006         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5007
5008         // Create some initial channels
5009         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5010
5011         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5012
5013         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5014         assert_eq!(commitment_tx[0].input.len(), 1);
5015         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
5016
5017         // Settle A's commitment tx on B's chain
5018         assert!(nodes[1].node.claim_funds(payment_preimage));
5019         check_added_monitors!(nodes[1], 1);
5020         mine_transaction(&nodes[1], &commitment_tx[0]);
5021         check_added_monitors!(nodes[1], 1);
5022         let events = nodes[1].node.get_and_clear_pending_msg_events();
5023         match events[0] {
5024                 MessageSendEvent::UpdateHTLCs { .. } => {},
5025                 _ => panic!("Unexpected event"),
5026         }
5027         match events[1] {
5028                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5029                 _ => panic!("Unexepected event"),
5030         }
5031
5032         // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
5033         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: preimage tx
5034         assert_eq!(node_txn.len(), 3);
5035         check_spends!(node_txn[0], commitment_tx[0]);
5036         assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5037         check_spends!(node_txn[1], chan_1.3);
5038         check_spends!(node_txn[2], node_txn[1]);
5039
5040         mine_transaction(&nodes[1], &node_txn[0]);
5041         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5042
5043         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5044         assert_eq!(spend_txn.len(), 1);
5045         check_spends!(spend_txn[0], node_txn[0]);
5046 }
5047
5048 #[test]
5049 fn test_static_spendable_outputs_timeout_tx() {
5050         let chanmon_cfgs = create_chanmon_cfgs(2);
5051         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5052         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5053         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5054
5055         // Create some initial channels
5056         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5057
5058         // Rebalance the network a bit by relaying one payment through all the channels ...
5059         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5060
5061         let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
5062
5063         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5064         assert_eq!(commitment_tx[0].input.len(), 1);
5065         assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
5066
5067         // Settle A's commitment tx on B' chain
5068         mine_transaction(&nodes[1], &commitment_tx[0]);
5069         check_added_monitors!(nodes[1], 1);
5070         let events = nodes[1].node.get_and_clear_pending_msg_events();
5071         match events[0] {
5072                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5073                 _ => panic!("Unexpected event"),
5074         }
5075         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5076
5077         // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx
5078         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
5079         assert_eq!(node_txn.len(), 2); // ChannelManager : 1 local commitent tx, ChannelMonitor: timeout tx
5080         check_spends!(node_txn[0], chan_1.3.clone());
5081         check_spends!(node_txn[1],  commitment_tx[0].clone());
5082         assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5083
5084         mine_transaction(&nodes[1], &node_txn[1]);
5085         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5086         expect_payment_failed!(nodes[1], our_payment_hash, true);
5087
5088         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5089         assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
5090         check_spends!(spend_txn[0], commitment_tx[0]);
5091         check_spends!(spend_txn[1], node_txn[1]);
5092         check_spends!(spend_txn[2], node_txn[1], commitment_tx[0]); // All outputs
5093 }
5094
5095 #[test]
5096 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
5097         let chanmon_cfgs = create_chanmon_cfgs(2);
5098         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5099         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5100         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5101
5102         // Create some initial channels
5103         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5104
5105         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5106         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5107         assert_eq!(revoked_local_txn[0].input.len(), 1);
5108         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5109
5110         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5111
5112         mine_transaction(&nodes[1], &revoked_local_txn[0]);
5113         check_closed_broadcast!(nodes[1], true);
5114         check_added_monitors!(nodes[1], 1);
5115
5116         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5117         assert_eq!(node_txn.len(), 2);
5118         assert_eq!(node_txn[0].input.len(), 2);
5119         check_spends!(node_txn[0], revoked_local_txn[0]);
5120
5121         mine_transaction(&nodes[1], &node_txn[0]);
5122         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5123
5124         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5125         assert_eq!(spend_txn.len(), 1);
5126         check_spends!(spend_txn[0], node_txn[0]);
5127 }
5128
5129 #[test]
5130 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
5131         let mut chanmon_cfgs = create_chanmon_cfgs(2);
5132         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
5133         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5134         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5135         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5136
5137         // Create some initial channels
5138         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5139
5140         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5141         let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5142         assert_eq!(revoked_local_txn[0].input.len(), 1);
5143         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5144
5145         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5146
5147         // A will generate HTLC-Timeout from revoked commitment tx
5148         mine_transaction(&nodes[0], &revoked_local_txn[0]);
5149         check_closed_broadcast!(nodes[0], true);
5150         check_added_monitors!(nodes[0], 1);
5151         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5152
5153         let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5154         assert_eq!(revoked_htlc_txn.len(), 2);
5155         check_spends!(revoked_htlc_txn[0], chan_1.3);
5156         assert_eq!(revoked_htlc_txn[1].input.len(), 1);
5157         assert_eq!(revoked_htlc_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5158         check_spends!(revoked_htlc_txn[1], revoked_local_txn[0]);
5159         assert_ne!(revoked_htlc_txn[1].lock_time, 0); // HTLC-Timeout
5160
5161         // B will generate justice tx from A's revoked commitment/HTLC tx
5162         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5163         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[1].clone()] });
5164         check_closed_broadcast!(nodes[1], true);
5165         check_added_monitors!(nodes[1], 1);
5166
5167         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5168         assert_eq!(node_txn.len(), 3); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs, ChannelManager: local commitment tx
5169         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
5170         // including the one already spent by revoked_htlc_txn[1]. That's OK, we'll spend with valid
5171         // transactions next...
5172         assert_eq!(node_txn[0].input.len(), 3);
5173         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[1]);
5174
5175         assert_eq!(node_txn[1].input.len(), 2);
5176         check_spends!(node_txn[1], revoked_local_txn[0], revoked_htlc_txn[1]);
5177         if node_txn[1].input[1].previous_output.txid == revoked_htlc_txn[1].txid() {
5178                 assert_ne!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output);
5179         } else {
5180                 assert_eq!(node_txn[1].input[0].previous_output.txid, revoked_htlc_txn[1].txid());
5181                 assert_ne!(node_txn[1].input[1].previous_output, revoked_htlc_txn[1].input[0].previous_output);
5182         }
5183
5184         assert_eq!(node_txn[2].input.len(), 1);
5185         check_spends!(node_txn[2], chan_1.3);
5186
5187         mine_transaction(&nodes[1], &node_txn[1]);
5188         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5189
5190         // Check B's ChannelMonitor was able to generate the right spendable output descriptor
5191         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5192         assert_eq!(spend_txn.len(), 1);
5193         assert_eq!(spend_txn[0].input.len(), 1);
5194         check_spends!(spend_txn[0], node_txn[1]);
5195 }
5196
5197 #[test]
5198 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
5199         let mut chanmon_cfgs = create_chanmon_cfgs(2);
5200         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
5201         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5202         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5203         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5204
5205         // Create some initial channels
5206         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5207
5208         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
5209         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5210         assert_eq!(revoked_local_txn[0].input.len(), 1);
5211         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
5212
5213         // The to-be-revoked commitment tx should have one HTLC and one to_remote output
5214         assert_eq!(revoked_local_txn[0].output.len(), 2);
5215
5216         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
5217
5218         // B will generate HTLC-Success from revoked commitment tx
5219         mine_transaction(&nodes[1], &revoked_local_txn[0]);
5220         check_closed_broadcast!(nodes[1], true);
5221         check_added_monitors!(nodes[1], 1);
5222         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5223
5224         assert_eq!(revoked_htlc_txn.len(), 2);
5225         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
5226         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5227         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
5228
5229         // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH:
5230         let unspent_local_txn_output = revoked_htlc_txn[0].input[0].previous_output.vout as usize ^ 1;
5231         assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
5232
5233         // A will generate justice tx from B's revoked commitment/HTLC tx
5234         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
5235         connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
5236         check_closed_broadcast!(nodes[0], true);
5237         check_added_monitors!(nodes[0], 1);
5238
5239         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5240         assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
5241
5242         // The first transaction generated is bogus - it spends both outputs of revoked_local_txn[0]
5243         // including the one already spent by revoked_htlc_txn[0]. That's OK, we'll spend with valid
5244         // transactions next...
5245         assert_eq!(node_txn[0].input.len(), 2);
5246         check_spends!(node_txn[0], revoked_local_txn[0], revoked_htlc_txn[0]);
5247         if node_txn[0].input[1].previous_output.txid == revoked_htlc_txn[0].txid() {
5248                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5249         } else {
5250                 assert_eq!(node_txn[0].input[0].previous_output.txid, revoked_htlc_txn[0].txid());
5251                 assert_eq!(node_txn[0].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5252         }
5253
5254         assert_eq!(node_txn[1].input.len(), 1);
5255         check_spends!(node_txn[1], revoked_htlc_txn[0]);
5256
5257         check_spends!(node_txn[2], chan_1.3);
5258
5259         mine_transaction(&nodes[0], &node_txn[1]);
5260         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5261
5262         // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
5263         // didn't try to generate any new transactions.
5264
5265         // Check A's ChannelMonitor was able to generate the right spendable output descriptor
5266         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5267         assert_eq!(spend_txn.len(), 3);
5268         assert_eq!(spend_txn[0].input.len(), 1);
5269         check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
5270         assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
5271         check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
5272         check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
5273 }
5274
5275 #[test]
5276 fn test_onchain_to_onchain_claim() {
5277         // Test that in case of channel closure, we detect the state of output and claim HTLC
5278         // on downstream peer's remote commitment tx.
5279         // First, have C claim an HTLC against its own latest commitment transaction.
5280         // Then, broadcast these to B, which should update the monitor downstream on the A<->B
5281         // channel.
5282         // Finally, check that B will claim the HTLC output if A's latest commitment transaction
5283         // gets broadcast.
5284
5285         let chanmon_cfgs = create_chanmon_cfgs(3);
5286         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5287         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5288         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5289
5290         // Create some initial channels
5291         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5292         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5293
5294         // Ensure all nodes are at the same height
5295         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5296         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5297         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5298         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5299
5300         // Rebalance the network a bit by relaying one payment through all the channels ...
5301         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
5302         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
5303
5304         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
5305         let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
5306         check_spends!(commitment_tx[0], chan_2.3);
5307         nodes[2].node.claim_funds(payment_preimage);
5308         check_added_monitors!(nodes[2], 1);
5309         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
5310         assert!(updates.update_add_htlcs.is_empty());
5311         assert!(updates.update_fail_htlcs.is_empty());
5312         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5313         assert!(updates.update_fail_malformed_htlcs.is_empty());
5314
5315         mine_transaction(&nodes[2], &commitment_tx[0]);
5316         check_closed_broadcast!(nodes[2], true);
5317         check_added_monitors!(nodes[2], 1);
5318
5319         let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
5320         assert_eq!(c_txn.len(), 3);
5321         assert_eq!(c_txn[0], c_txn[2]);
5322         assert_eq!(commitment_tx[0], c_txn[1]);
5323         check_spends!(c_txn[1], chan_2.3);
5324         check_spends!(c_txn[2], c_txn[1]);
5325         assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
5326         assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5327         assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
5328         assert_eq!(c_txn[0].lock_time, 0); // Success tx
5329
5330         // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
5331         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
5332         connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
5333         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5334         {
5335                 let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5336                 // ChannelMonitor: claim tx, ChannelManager: local commitment tx
5337                 assert_eq!(b_txn.len(), 2);
5338                 check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
5339                 check_spends!(b_txn[1], c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor
5340                 assert_eq!(b_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5341                 assert!(b_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
5342                 assert_ne!(b_txn[1].lock_time, 0); // Timeout tx
5343                 b_txn.clear();
5344         }
5345         check_added_monitors!(nodes[1], 1);
5346         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
5347         assert_eq!(msg_events.len(), 3);
5348         check_added_monitors!(nodes[1], 1);
5349         match msg_events[0] {
5350                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5351                 _ => panic!("Unexpected event"),
5352         }
5353         match msg_events[1] {
5354                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
5355                 _ => panic!("Unexpected event"),
5356         }
5357         match msg_events[2] {
5358                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
5359                         assert!(update_add_htlcs.is_empty());
5360                         assert!(update_fail_htlcs.is_empty());
5361                         assert_eq!(update_fulfill_htlcs.len(), 1);
5362                         assert!(update_fail_malformed_htlcs.is_empty());
5363                         assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
5364                 },
5365                 _ => panic!("Unexpected event"),
5366         };
5367         // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
5368         let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
5369         mine_transaction(&nodes[1], &commitment_tx[0]);
5370         let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5371         // ChannelMonitor: HTLC-Success tx + HTLC-Timeout RBF Bump, ChannelManager: local commitment tx + HTLC-Success tx
5372         assert_eq!(b_txn.len(), 4);
5373         check_spends!(b_txn[2], chan_1.3);
5374         check_spends!(b_txn[3], b_txn[2]);
5375         let (htlc_success_claim, htlc_timeout_bumped) =
5376                 if b_txn[0].input[0].previous_output.txid == commitment_tx[0].txid()
5377                         { (&b_txn[0], &b_txn[1]) } else { (&b_txn[1], &b_txn[0]) };
5378         check_spends!(htlc_success_claim, commitment_tx[0]);
5379         assert_eq!(htlc_success_claim.input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5380         assert!(htlc_success_claim.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
5381         assert_eq!(htlc_success_claim.lock_time, 0); // Success tx
5382         check_spends!(htlc_timeout_bumped, c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor
5383         assert_ne!(htlc_timeout_bumped.lock_time, 0); // Success tx
5384
5385         check_closed_broadcast!(nodes[1], true);
5386         check_added_monitors!(nodes[1], 1);
5387 }
5388
5389 #[test]
5390 fn test_duplicate_payment_hash_one_failure_one_success() {
5391         // Topology : A --> B --> C --> D
5392         // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
5393         // Note that because C will refuse to generate two payment secrets for the same payment hash,
5394         // we forward one of the payments onwards to D.
5395         let chanmon_cfgs = create_chanmon_cfgs(4);
5396         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
5397         // When this test was written, the default base fee floated based on the HTLC count.
5398         // It is now fixed, so we simply set the fee to the expected value here.
5399         let mut config = test_default_channel_config();
5400         config.channel_options.forwarding_fee_base_msat = 196;
5401         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
5402                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5403         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
5404
5405         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5406         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5407         create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
5408
5409         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5410         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5411         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5412         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5413         connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
5414
5415         let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
5416
5417         let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, 0).unwrap();
5418         // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
5419         // script push size limit so that the below script length checks match
5420         // ACCEPTED_HTLC_SCRIPT_WEIGHT.
5421         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
5422                 &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 900000, TEST_FINAL_CLTV - 40, nodes[0].logger).unwrap();
5423         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 900000, duplicate_payment_hash, payment_secret);
5424
5425         let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
5426         assert_eq!(commitment_txn[0].input.len(), 1);
5427         check_spends!(commitment_txn[0], chan_2.3);
5428
5429         mine_transaction(&nodes[1], &commitment_txn[0]);
5430         check_closed_broadcast!(nodes[1], true);
5431         check_added_monitors!(nodes[1], 1);
5432         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32 - 1); // Confirm blocks until the HTLC expires
5433
5434         let htlc_timeout_tx;
5435         { // Extract one of the two HTLC-Timeout transaction
5436                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5437                 // ChannelMonitor: timeout tx * 3, ChannelManager: local commitment tx
5438                 assert_eq!(node_txn.len(), 4);
5439                 check_spends!(node_txn[0], chan_2.3);
5440
5441                 check_spends!(node_txn[1], commitment_txn[0]);
5442                 assert_eq!(node_txn[1].input.len(), 1);
5443                 check_spends!(node_txn[2], commitment_txn[0]);
5444                 assert_eq!(node_txn[2].input.len(), 1);
5445                 assert_eq!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
5446                 check_spends!(node_txn[3], commitment_txn[0]);
5447                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[3].input[0].previous_output);
5448
5449                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5450                 assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5451                 assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5452                 htlc_timeout_tx = node_txn[1].clone();
5453         }
5454
5455         nodes[2].node.claim_funds(our_payment_preimage);
5456         mine_transaction(&nodes[2], &commitment_txn[0]);
5457         check_added_monitors!(nodes[2], 2);
5458         let events = nodes[2].node.get_and_clear_pending_msg_events();
5459         match events[0] {
5460                 MessageSendEvent::UpdateHTLCs { .. } => {},
5461                 _ => panic!("Unexpected event"),
5462         }
5463         match events[1] {
5464                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5465                 _ => panic!("Unexepected event"),
5466         }
5467         let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
5468         assert_eq!(htlc_success_txn.len(), 5); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs), ChannelManager: local commitment tx + HTLC-Success txn (*2 due to 2-HTLC outputs)
5469         check_spends!(htlc_success_txn[0], commitment_txn[0]);
5470         check_spends!(htlc_success_txn[1], commitment_txn[0]);
5471         assert_eq!(htlc_success_txn[0].input.len(), 1);
5472         assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5473         assert_eq!(htlc_success_txn[1].input.len(), 1);
5474         assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5475         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output);
5476         assert_eq!(htlc_success_txn[2], commitment_txn[0]);
5477         assert_eq!(htlc_success_txn[3], htlc_success_txn[0]);
5478         assert_eq!(htlc_success_txn[4], htlc_success_txn[1]);
5479         assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
5480
5481         mine_transaction(&nodes[1], &htlc_timeout_tx);
5482         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5483         expect_pending_htlcs_forwardable!(nodes[1]);
5484         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5485         assert!(htlc_updates.update_add_htlcs.is_empty());
5486         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
5487         let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id;
5488         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
5489         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
5490         check_added_monitors!(nodes[1], 1);
5491
5492         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
5493         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
5494         {
5495                 commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
5496                 expect_payment_failure_chan_update!(nodes[0], chan_2.0.contents.short_channel_id, true);
5497         }
5498         expect_payment_failed!(nodes[0], duplicate_payment_hash, false);
5499
5500         // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
5501         mine_transaction(&nodes[1], &htlc_success_txn[0]);
5502         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
5503         assert!(updates.update_add_htlcs.is_empty());
5504         assert!(updates.update_fail_htlcs.is_empty());
5505         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
5506         assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id);
5507         assert!(updates.update_fail_malformed_htlcs.is_empty());
5508         check_added_monitors!(nodes[1], 1);
5509
5510         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
5511         commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
5512
5513         let events = nodes[0].node.get_and_clear_pending_events();
5514         match events[0] {
5515                 Event::PaymentSent { ref payment_preimage } => {
5516                         assert_eq!(*payment_preimage, our_payment_preimage);
5517                 }
5518                 _ => panic!("Unexpected event"),
5519         }
5520 }
5521
5522 #[test]
5523 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
5524         let chanmon_cfgs = create_chanmon_cfgs(2);
5525         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5526         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5527         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5528
5529         // Create some initial channels
5530         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5531
5532         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
5533         let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
5534         assert_eq!(local_txn.len(), 1);
5535         assert_eq!(local_txn[0].input.len(), 1);
5536         check_spends!(local_txn[0], chan_1.3);
5537
5538         // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
5539         nodes[1].node.claim_funds(payment_preimage);
5540         check_added_monitors!(nodes[1], 1);
5541         mine_transaction(&nodes[1], &local_txn[0]);
5542         check_added_monitors!(nodes[1], 1);
5543         let events = nodes[1].node.get_and_clear_pending_msg_events();
5544         match events[0] {
5545                 MessageSendEvent::UpdateHTLCs { .. } => {},
5546                 _ => panic!("Unexpected event"),
5547         }
5548         match events[1] {
5549                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
5550                 _ => panic!("Unexepected event"),
5551         }
5552         let node_tx = {
5553                 let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
5554                 assert_eq!(node_txn.len(), 3);
5555                 assert_eq!(node_txn[0], node_txn[2]);
5556                 assert_eq!(node_txn[1], local_txn[0]);
5557                 assert_eq!(node_txn[0].input.len(), 1);
5558                 assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
5559                 check_spends!(node_txn[0], local_txn[0]);
5560                 node_txn[0].clone()
5561         };
5562
5563         mine_transaction(&nodes[1], &node_tx);
5564         connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1);
5565
5566         // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
5567         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5568         assert_eq!(spend_txn.len(), 1);
5569         assert_eq!(spend_txn[0].input.len(), 1);
5570         check_spends!(spend_txn[0], node_tx);
5571         assert_eq!(spend_txn[0].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
5572 }
5573
5574 fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
5575         // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
5576         // unrevoked commitment transaction.
5577         // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
5578         // a remote RAA before they could be failed backwards (and combinations thereof).
5579         // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
5580         // use the same payment hashes.
5581         // Thus, we use a six-node network:
5582         //
5583         // A \         / E
5584         //    - C - D -
5585         // B /         \ F
5586         // And test where C fails back to A/B when D announces its latest commitment transaction
5587         let chanmon_cfgs = create_chanmon_cfgs(6);
5588         let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
5589         // When this test was written, the default base fee floated based on the HTLC count.
5590         // It is now fixed, so we simply set the fee to the expected value here.
5591         let mut config = test_default_channel_config();
5592         config.channel_options.forwarding_fee_base_msat = 196;
5593         let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
5594                 &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
5595         let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
5596         let logger = test_utils::TestLogger::new();
5597
5598         create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
5599         create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
5600         let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
5601         create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
5602         create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
5603
5604         // Rebalance and check output sanity...
5605         send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
5606         send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
5607         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
5608
5609         let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
5610         // 0th HTLC:
5611         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5612         // 1st HTLC:
5613         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5614         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
5615         let our_node_id = &nodes[1].node.get_our_node_id();
5616         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap();
5617         // 2nd HTLC:
5618         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5619         // 3rd HTLC:
5620         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5621         // 4th HTLC:
5622         let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5623         // 5th HTLC:
5624         let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5625         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
5626         // 6th HTLC:
5627         send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, 0).unwrap());
5628         // 7th HTLC:
5629         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, 0).unwrap());
5630
5631         // 8th HTLC:
5632         let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
5633         // 9th HTLC:
5634         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV, &logger).unwrap();
5635         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, 0).unwrap()); // not added < dust limit + HTLC tx fee
5636
5637         // 10th HTLC:
5638         let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
5639         // 11th HTLC:
5640         let route = get_route(our_node_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[5].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
5641         send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, 0).unwrap());
5642
5643         // Double-check that six of the new HTLC were added
5644         // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
5645         // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
5646         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
5647         assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
5648
5649         // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
5650         // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
5651         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1));
5652         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3));
5653         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
5654         assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
5655         check_added_monitors!(nodes[4], 0);
5656         expect_pending_htlcs_forwardable!(nodes[4]);
5657         check_added_monitors!(nodes[4], 1);
5658
5659         let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
5660         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]);
5661         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]);
5662         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]);
5663         nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]);
5664         commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
5665
5666         // Fail 3rd below-dust and 7th above-dust HTLCs
5667         assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
5668         assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
5669         check_added_monitors!(nodes[5], 0);
5670         expect_pending_htlcs_forwardable!(nodes[5]);
5671         check_added_monitors!(nodes[5], 1);
5672
5673         let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
5674         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]);
5675         nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
5676         commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
5677
5678         let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
5679
5680         expect_pending_htlcs_forwardable!(nodes[3]);
5681         check_added_monitors!(nodes[3], 1);
5682         let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
5683         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
5684         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]);
5685         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]);
5686         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]);
5687         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]);
5688         nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]);
5689         if deliver_last_raa {
5690                 commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
5691         } else {
5692                 let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
5693         }
5694
5695         // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
5696         // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
5697         // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
5698         // propagated back to A/B yet (and D has two unrevoked commitment transactions).
5699         //
5700         // We now broadcast the latest commitment transaction, which *should* result in failures for
5701         // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
5702         // the non-broadcast above-dust HTLCs.
5703         //
5704         // Alternatively, we may broadcast the previous commitment transaction, which should only
5705         // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
5706         let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
5707
5708         if announce_latest {
5709                 mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
5710         } else {
5711                 mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
5712         }
5713         connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
5714         check_closed_broadcast!(nodes[2], true);
5715         expect_pending_htlcs_forwardable!(nodes[2]);
5716         check_added_monitors!(nodes[2], 3);
5717
5718         let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
5719         assert_eq!(cs_msgs.len(), 2);
5720         let mut a_done = false;
5721         for msg in cs_msgs {
5722                 match msg {
5723                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
5724                                 // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
5725                                 // should be failed-backwards here.
5726                                 let target = if *node_id == nodes[0].node.get_our_node_id() {
5727                                         // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
5728                                         for htlc in &updates.update_fail_htlcs {
5729                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
5730                                         }
5731                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
5732                                         assert!(!a_done);
5733                                         a_done = true;
5734                                         &nodes[0]
5735                                 } else {
5736                                         // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
5737                                         for htlc in &updates.update_fail_htlcs {
5738                                                 assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
5739                                         }
5740                                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
5741                                         assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
5742                                         &nodes[1]
5743                                 };
5744                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
5745                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]);
5746                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]);
5747                                 if announce_latest {
5748                                         target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]);
5749                                         if *node_id == nodes[0].node.get_our_node_id() {
5750                                                 target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]);
5751                                         }
5752                                 }
5753                                 commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
5754                         },
5755                         _ => panic!("Unexpected event"),
5756                 }
5757         }
5758
5759         let as_events = nodes[0].node.get_and_clear_pending_events();
5760         assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
5761         let mut as_failds = HashSet::new();
5762         for event in as_events.iter() {
5763                 if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
5764                         assert!(as_failds.insert(*payment_hash));
5765                         if *payment_hash != payment_hash_2 {
5766                                 assert_eq!(*rejected_by_dest, deliver_last_raa);
5767                         } else {
5768                                 assert!(!rejected_by_dest);
5769                         }
5770                 } else { panic!("Unexpected event"); }
5771         }
5772         assert!(as_failds.contains(&payment_hash_1));
5773         assert!(as_failds.contains(&payment_hash_2));
5774         if announce_latest {
5775                 assert!(as_failds.contains(&payment_hash_3));
5776                 assert!(as_failds.contains(&payment_hash_5));
5777         }
5778         assert!(as_failds.contains(&payment_hash_6));
5779
5780         let bs_events = nodes[1].node.get_and_clear_pending_events();
5781         assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
5782         let mut bs_failds = HashSet::new();
5783         for event in bs_events.iter() {
5784                 if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
5785                         assert!(bs_failds.insert(*payment_hash));
5786                         if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
5787                                 assert_eq!(*rejected_by_dest, deliver_last_raa);
5788                         } else {
5789                                 assert!(!rejected_by_dest);
5790                         }
5791                 } else { panic!("Unexpected event"); }
5792         }
5793         assert!(bs_failds.contains(&payment_hash_1));
5794         assert!(bs_failds.contains(&payment_hash_2));
5795         if announce_latest {
5796                 assert!(bs_failds.contains(&payment_hash_4));
5797         }
5798         assert!(bs_failds.contains(&payment_hash_5));
5799
5800         // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
5801         // get a PaymentFailureNetworkUpdate. A should have gotten 4 HTLCs which were failed-back due
5802         // to unknown-preimage-etc, B should have gotten 2. Thus, in the
5803         // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2
5804         // PaymentFailureNetworkUpdates.
5805         let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
5806         assert_eq!(as_msg_events.len(), if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
5807         let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
5808         assert_eq!(bs_msg_events.len(), if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
5809         for event in as_msg_events.iter().chain(bs_msg_events.iter()) {
5810                 match event {
5811                         &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
5812                         _ => panic!("Unexpected event"),
5813                 }
5814         }
5815 }
5816
5817 #[test]
5818 fn test_fail_backwards_latest_remote_announce_a() {
5819         do_test_fail_backwards_unrevoked_remote_announce(false, true);
5820 }
5821
5822 #[test]
5823 fn test_fail_backwards_latest_remote_announce_b() {
5824         do_test_fail_backwards_unrevoked_remote_announce(true, true);
5825 }
5826
5827 #[test]
5828 fn test_fail_backwards_previous_remote_announce() {
5829         do_test_fail_backwards_unrevoked_remote_announce(false, false);
5830         // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
5831         // tested for in test_commitment_revoked_fail_backward_exhaustive()
5832 }
5833
5834 #[test]
5835 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
5836         let chanmon_cfgs = create_chanmon_cfgs(2);
5837         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5838         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5839         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5840
5841         // Create some initial channels
5842         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5843
5844         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5845         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
5846         assert_eq!(local_txn[0].input.len(), 1);
5847         check_spends!(local_txn[0], chan_1.3);
5848
5849         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5850         mine_transaction(&nodes[0], &local_txn[0]);
5851         check_closed_broadcast!(nodes[0], true);
5852         check_added_monitors!(nodes[0], 1);
5853         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5854
5855         let htlc_timeout = {
5856                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5857                 assert_eq!(node_txn.len(), 2);
5858                 check_spends!(node_txn[0], chan_1.3);
5859                 assert_eq!(node_txn[1].input.len(), 1);
5860                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5861                 check_spends!(node_txn[1], local_txn[0]);
5862                 node_txn[1].clone()
5863         };
5864
5865         mine_transaction(&nodes[0], &htlc_timeout);
5866         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5867         expect_payment_failed!(nodes[0], our_payment_hash, true);
5868
5869         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5870         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5871         assert_eq!(spend_txn.len(), 3);
5872         check_spends!(spend_txn[0], local_txn[0]);
5873         assert_eq!(spend_txn[1].input.len(), 1);
5874         check_spends!(spend_txn[1], htlc_timeout);
5875         assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
5876         assert_eq!(spend_txn[2].input.len(), 2);
5877         check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
5878         assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
5879                 spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
5880 }
5881
5882 #[test]
5883 fn test_key_derivation_params() {
5884         // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with
5885         // a key manager rotation to test that key_derivation_params returned in DynamicOutputP2WSH
5886         // let us re-derive the channel key set to then derive a delayed_payment_key.
5887
5888         let chanmon_cfgs = create_chanmon_cfgs(3);
5889
5890         // We manually create the node configuration to backup the seed.
5891         let seed = [42; 32];
5892         let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5893         let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
5894         let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed };
5895         let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
5896         node_cfgs.remove(0);
5897         node_cfgs.insert(0, node);
5898
5899         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
5900         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
5901
5902         // Create some initial channels
5903         // Create a dummy channel to advance index by one and thus test re-derivation correctness
5904         // for node 0
5905         let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
5906         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5907         assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey);
5908
5909         // Ensure all nodes are at the same height
5910         let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32;
5911         connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1);
5912         connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
5913         connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
5914
5915         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
5916         let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
5917         let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
5918         assert_eq!(local_txn_1[0].input.len(), 1);
5919         check_spends!(local_txn_1[0], chan_1.3);
5920
5921         // We check funding pubkey are unique
5922         let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness[3][36..69]));
5923         let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness[3][36..69]));
5924         if from_0_funding_key_0 == from_1_funding_key_0
5925             || from_0_funding_key_0 == from_1_funding_key_1
5926             || from_0_funding_key_1 == from_1_funding_key_0
5927             || from_0_funding_key_1 == from_1_funding_key_1 {
5928                 panic!("Funding pubkeys aren't unique");
5929         }
5930
5931         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
5932         mine_transaction(&nodes[0], &local_txn_1[0]);
5933         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
5934         check_closed_broadcast!(nodes[0], true);
5935         check_added_monitors!(nodes[0], 1);
5936
5937         let htlc_timeout = {
5938                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
5939                 assert_eq!(node_txn[1].input.len(), 1);
5940                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
5941                 check_spends!(node_txn[1], local_txn_1[0]);
5942                 node_txn[1].clone()
5943         };
5944
5945         mine_transaction(&nodes[0], &htlc_timeout);
5946         connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
5947         expect_payment_failed!(nodes[0], our_payment_hash, true);
5948
5949         // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
5950         let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
5951         let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager);
5952         assert_eq!(spend_txn.len(), 3);
5953         check_spends!(spend_txn[0], local_txn_1[0]);
5954         assert_eq!(spend_txn[1].input.len(), 1);
5955         check_spends!(spend_txn[1], htlc_timeout);
5956         assert_eq!(spend_txn[1].input[0].sequence, BREAKDOWN_TIMEOUT as u32);
5957         assert_eq!(spend_txn[2].input.len(), 2);
5958         check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
5959         assert!(spend_txn[2].input[0].sequence == BREAKDOWN_TIMEOUT as u32 ||
5960                 spend_txn[2].input[1].sequence == BREAKDOWN_TIMEOUT as u32);
5961 }
5962
5963 #[test]
5964 fn test_static_output_closing_tx() {
5965         let chanmon_cfgs = create_chanmon_cfgs(2);
5966         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5967         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5968         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5969
5970         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5971
5972         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
5973         let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
5974
5975         mine_transaction(&nodes[0], &closing_tx);
5976         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
5977
5978         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
5979         assert_eq!(spend_txn.len(), 1);
5980         check_spends!(spend_txn[0], closing_tx);
5981
5982         mine_transaction(&nodes[1], &closing_tx);
5983         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
5984
5985         let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
5986         assert_eq!(spend_txn.len(), 1);
5987         check_spends!(spend_txn[0], closing_tx);
5988 }
5989
5990 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
5991         let chanmon_cfgs = create_chanmon_cfgs(2);
5992         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
5993         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
5994         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
5995         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
5996
5997         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
5998
5999         // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
6000         // present in B's local commitment transaction, but none of A's commitment transactions.
6001         assert!(nodes[1].node.claim_funds(our_payment_preimage));
6002         check_added_monitors!(nodes[1], 1);
6003
6004         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6005         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
6006         let events = nodes[0].node.get_and_clear_pending_events();
6007         assert_eq!(events.len(), 1);
6008         match events[0] {
6009                 Event::PaymentSent { payment_preimage } => {
6010                         assert_eq!(payment_preimage, our_payment_preimage);
6011                 },
6012                 _ => panic!("Unexpected event"),
6013         }
6014
6015         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
6016         check_added_monitors!(nodes[0], 1);
6017         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6018         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
6019         check_added_monitors!(nodes[1], 1);
6020
6021         let starting_block = nodes[1].best_block_info();
6022         let mut block = Block {
6023                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
6024                 txdata: vec![],
6025         };
6026         for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 {
6027                 connect_block(&nodes[1], &block);
6028                 block.header.prev_blockhash = block.block_hash();
6029         }
6030         test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
6031         check_closed_broadcast!(nodes[1], true);
6032         check_added_monitors!(nodes[1], 1);
6033 }
6034
6035 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
6036         let chanmon_cfgs = create_chanmon_cfgs(2);
6037         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6038         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6039         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6040         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6041         let logger = test_utils::TestLogger::new();
6042
6043         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1]);
6044         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6045         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV, &logger).unwrap();
6046         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
6047         check_added_monitors!(nodes[0], 1);
6048
6049         let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6050
6051         // As far as A is concerned, the HTLC is now present only in the latest remote commitment
6052         // transaction, however it is not in A's latest local commitment, so we can just broadcast that
6053         // to "time out" the HTLC.
6054
6055         let starting_block = nodes[1].best_block_info();
6056         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
6057
6058         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 {
6059                 connect_block(&nodes[0], &Block { header, txdata: Vec::new()});
6060                 header.prev_blockhash = header.block_hash();
6061         }
6062         test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6063         check_closed_broadcast!(nodes[0], true);
6064         check_added_monitors!(nodes[0], 1);
6065 }
6066
6067 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
6068         let chanmon_cfgs = create_chanmon_cfgs(3);
6069         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6070         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6071         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6072         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6073
6074         // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
6075         // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
6076         // Also optionally test that we *don't* fail the channel in case the commitment transaction was
6077         // actually revoked.
6078         let htlc_value = if use_dust { 50000 } else { 3000000 };
6079         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
6080         assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
6081         expect_pending_htlcs_forwardable!(nodes[1]);
6082         check_added_monitors!(nodes[1], 1);
6083
6084         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6085         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
6086         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
6087         check_added_monitors!(nodes[0], 1);
6088         let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6089         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
6090         check_added_monitors!(nodes[1], 1);
6091         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1);
6092         check_added_monitors!(nodes[1], 1);
6093         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
6094
6095         if check_revoke_no_close {
6096                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
6097                 check_added_monitors!(nodes[0], 1);
6098         }
6099
6100         let starting_block = nodes[1].best_block_info();
6101         let mut block = Block {
6102                 header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
6103                 txdata: vec![],
6104         };
6105         for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 {
6106                 connect_block(&nodes[0], &block);
6107                 block.header.prev_blockhash = block.block_hash();
6108         }
6109         if !check_revoke_no_close {
6110                 test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
6111                 check_closed_broadcast!(nodes[0], true);
6112                 check_added_monitors!(nodes[0], 1);
6113         } else {
6114                 expect_payment_failed!(nodes[0], our_payment_hash, true);
6115         }
6116 }
6117
6118 // Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
6119 // There are only a few cases to test here:
6120 //  * its not really normative behavior, but we test that below-dust HTLCs "included" in
6121 //    broadcastable commitment transactions result in channel closure,
6122 //  * its included in an unrevoked-but-previous remote commitment transaction,
6123 //  * its included in the latest remote or local commitment transactions.
6124 // We test each of the three possible commitment transactions individually and use both dust and
6125 // non-dust HTLCs.
6126 // Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
6127 // assume they are handled the same across all six cases, as both outbound and inbound failures are
6128 // tested for at least one of the cases in other tests.
6129 #[test]
6130 fn htlc_claim_single_commitment_only_a() {
6131         do_htlc_claim_local_commitment_only(true);
6132         do_htlc_claim_local_commitment_only(false);
6133
6134         do_htlc_claim_current_remote_commitment_only(true);
6135         do_htlc_claim_current_remote_commitment_only(false);
6136 }
6137
6138 #[test]
6139 fn htlc_claim_single_commitment_only_b() {
6140         do_htlc_claim_previous_remote_commitment_only(true, false);
6141         do_htlc_claim_previous_remote_commitment_only(false, false);
6142         do_htlc_claim_previous_remote_commitment_only(true, true);
6143         do_htlc_claim_previous_remote_commitment_only(false, true);
6144 }
6145
6146 #[test]
6147 #[should_panic]
6148 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
6149         let chanmon_cfgs = create_chanmon_cfgs(2);
6150         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6151         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6152         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6153         //Force duplicate channel ids
6154         for node in nodes.iter() {
6155                 *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
6156         }
6157
6158         // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
6159         let channel_value_satoshis=10000;
6160         let push_msat=10001;
6161         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
6162         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6163         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
6164
6165         //Create a second channel with a channel_id collision
6166         assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6167 }
6168
6169 #[test]
6170 fn bolt2_open_channel_sending_node_checks_part2() {
6171         let chanmon_cfgs = create_chanmon_cfgs(2);
6172         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6173         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6174         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6175
6176         // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
6177         let channel_value_satoshis=2^24;
6178         let push_msat=10001;
6179         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6180
6181         // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
6182         let channel_value_satoshis=10000;
6183         // Test when push_msat is equal to 1000 * funding_satoshis.
6184         let push_msat=1000*channel_value_satoshis+1;
6185         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
6186
6187         // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
6188         let channel_value_satoshis=10000;
6189         let push_msat=10001;
6190         assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
6191         let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6192         assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
6193
6194         // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
6195         // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
6196         assert!(node0_to_1_send_open_channel.channel_flags<=1);
6197
6198         // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
6199         assert!(BREAKDOWN_TIMEOUT>0);
6200         assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
6201
6202         // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
6203         let chain_hash=genesis_block(Network::Testnet).header.block_hash();
6204         assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
6205
6206         // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
6207         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
6208         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
6209         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
6210         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
6211         assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
6212 }
6213
6214 #[test]
6215 fn bolt2_open_channel_sane_dust_limit() {
6216         let chanmon_cfgs = create_chanmon_cfgs(2);
6217         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6218         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6219         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6220
6221         let channel_value_satoshis=1000000;
6222         let push_msat=10001;
6223         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
6224         let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
6225         node0_to_1_send_open_channel.dust_limit_satoshis = 661;
6226         node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
6227
6228         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &node0_to_1_send_open_channel);
6229         let events = nodes[1].node.get_and_clear_pending_msg_events();
6230         let err_msg = match events[0] {
6231                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
6232                         msg.clone()
6233                 },
6234                 _ => panic!("Unexpected event"),
6235         };
6236         assert_eq!(err_msg.data, "dust_limit_satoshis (661) is greater than the implementation limit (660)");
6237 }
6238
6239 // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC
6240 // originated from our node, its failure is surfaced to the user. We trigger this failure to
6241 // free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC
6242 // is no longer affordable once it's freed.
6243 #[test]
6244 fn test_fail_holding_cell_htlc_upon_free() {
6245         let chanmon_cfgs = create_chanmon_cfgs(2);
6246         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6247         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6248         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6249         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6250         let logger = test_utils::TestLogger::new();
6251
6252         // First nodes[0] generates an update_fee, setting the channel's
6253         // pending_update_fee.
6254         nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 20).unwrap();
6255         check_added_monitors!(nodes[0], 1);
6256
6257         let events = nodes[0].node.get_and_clear_pending_msg_events();
6258         assert_eq!(events.len(), 1);
6259         let (update_msg, commitment_signed) = match events[0] {
6260                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6261                         (update_fee.as_ref(), commitment_signed)
6262                 },
6263                 _ => panic!("Unexpected event"),
6264         };
6265
6266         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6267
6268         let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6269         let channel_reserve = chan_stat.channel_reserve_msat;
6270         let feerate = get_feerate!(nodes[0], chan.2);
6271
6272         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6273         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6274         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1);
6275         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6276         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6277
6278         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6279         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6280         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6281         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6282
6283         // Flush the pending fee update.
6284         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6285         let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6286         check_added_monitors!(nodes[1], 1);
6287         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack);
6288         check_added_monitors!(nodes[0], 1);
6289
6290         // Upon receipt of the RAA, there will be an attempt to resend the holding cell
6291         // HTLC, but now that the fee has been raised the payment will now fail, causing
6292         // us to surface its failure to the user.
6293         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6294         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6295         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
6296         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
6297                 hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
6298         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
6299
6300         // Check that the payment failed to be sent out.
6301         let events = nodes[0].node.get_and_clear_pending_events();
6302         assert_eq!(events.len(), 1);
6303         match &events[0] {
6304                 &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => {
6305                         assert_eq!(our_payment_hash.clone(), *payment_hash);
6306                         assert_eq!(*rejected_by_dest, false);
6307                         assert_eq!(*error_code, None);
6308                         assert_eq!(*error_data, None);
6309                 },
6310                 _ => panic!("Unexpected event"),
6311         }
6312 }
6313
6314 // Test that if multiple HTLCs are released from the holding cell and one is
6315 // valid but the other is no longer valid upon release, the valid HTLC can be
6316 // successfully completed while the other one fails as expected.
6317 #[test]
6318 fn test_free_and_fail_holding_cell_htlcs() {
6319         let chanmon_cfgs = create_chanmon_cfgs(2);
6320         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6321         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6322         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6323         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6324         let logger = test_utils::TestLogger::new();
6325
6326         // First nodes[0] generates an update_fee, setting the channel's
6327         // pending_update_fee.
6328         nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 200).unwrap();
6329         check_added_monitors!(nodes[0], 1);
6330
6331         let events = nodes[0].node.get_and_clear_pending_msg_events();
6332         assert_eq!(events.len(), 1);
6333         let (update_msg, commitment_signed) = match events[0] {
6334                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6335                         (update_fee.as_ref(), commitment_signed)
6336                 },
6337                 _ => panic!("Unexpected event"),
6338         };
6339
6340         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap());
6341
6342         let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6343         let channel_reserve = chan_stat.channel_reserve_msat;
6344         let feerate = get_feerate!(nodes[0], chan.2);
6345
6346         // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
6347         let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(nodes[1]);
6348         let amt_1 = 20000;
6349         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
6350         let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1;
6351         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6352         let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_1, TEST_FINAL_CLTV, &logger).unwrap();
6353         let route_2 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], amt_2, TEST_FINAL_CLTV, &logger).unwrap();
6354
6355         // Send 2 payments which pass reserve checks but get stuck in the holding cell.
6356         nodes[0].node.send_payment(&route_1, payment_hash_1, &Some(payment_secret_1)).unwrap();
6357         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6358         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1);
6359         nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2)).unwrap();
6360         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6361         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2);
6362
6363         // Flush the pending fee update.
6364         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed);
6365         let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6366         check_added_monitors!(nodes[1], 1);
6367         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack);
6368         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6369         check_added_monitors!(nodes[0], 2);
6370
6371         // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs,
6372         // but now that the fee has been raised the second payment will now fail, causing us
6373         // to surface its failure to the user. The first payment should succeed.
6374         chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6375         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
6376         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
6377         let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
6378                 hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
6379         nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
6380
6381         // Check that the second payment failed to be sent out.
6382         let events = nodes[0].node.get_and_clear_pending_events();
6383         assert_eq!(events.len(), 1);
6384         match &events[0] {
6385                 &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => {
6386                         assert_eq!(payment_hash_2.clone(), *payment_hash);
6387                         assert_eq!(*rejected_by_dest, false);
6388                         assert_eq!(*error_code, None);
6389                         assert_eq!(*error_data, None);
6390                 },
6391                 _ => panic!("Unexpected event"),
6392         }
6393
6394         // Complete the first payment and the RAA from the fee update.
6395         let (payment_event, send_raa_event) = {
6396                 let mut msgs = nodes[0].node.get_and_clear_pending_msg_events();
6397                 assert_eq!(msgs.len(), 2);
6398                 (SendEvent::from_event(msgs.remove(0)), msgs.remove(0))
6399         };
6400         let raa = match send_raa_event {
6401                 MessageSendEvent::SendRevokeAndACK { msg, .. } => msg,
6402                 _ => panic!("Unexpected event"),
6403         };
6404         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6405         check_added_monitors!(nodes[1], 1);
6406         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6407         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6408         let events = nodes[1].node.get_and_clear_pending_events();
6409         assert_eq!(events.len(), 1);
6410         match events[0] {
6411                 Event::PendingHTLCsForwardable { .. } => {},
6412                 _ => panic!("Unexpected event"),
6413         }
6414         nodes[1].node.process_pending_htlc_forwards();
6415         let events = nodes[1].node.get_and_clear_pending_events();
6416         assert_eq!(events.len(), 1);
6417         match events[0] {
6418                 Event::PaymentReceived { .. } => {},
6419                 _ => panic!("Unexpected event"),
6420         }
6421         nodes[1].node.claim_funds(payment_preimage_1);
6422         check_added_monitors!(nodes[1], 1);
6423         let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6424         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
6425         commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
6426         let events = nodes[0].node.get_and_clear_pending_events();
6427         assert_eq!(events.len(), 1);
6428         match events[0] {
6429                 Event::PaymentSent { ref payment_preimage } => {
6430                         assert_eq!(*payment_preimage, payment_preimage_1);
6431                 }
6432                 _ => panic!("Unexpected event"),
6433         }
6434 }
6435
6436 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
6437 // HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing
6438 // our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable
6439 // once it's freed.
6440 #[test]
6441 fn test_fail_holding_cell_htlc_upon_free_multihop() {
6442         let chanmon_cfgs = create_chanmon_cfgs(3);
6443         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6444         // When this test was written, the default base fee floated based on the HTLC count.
6445         // It is now fixed, so we simply set the fee to the expected value here.
6446         let mut config = test_default_channel_config();
6447         config.channel_options.forwarding_fee_base_msat = 196;
6448         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
6449         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6450         let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6451         let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6452         let logger = test_utils::TestLogger::new();
6453
6454         // First nodes[1] generates an update_fee, setting the channel's
6455         // pending_update_fee.
6456         nodes[1].node.update_fee(chan_1_2.2, get_feerate!(nodes[1], chan_1_2.2) + 20).unwrap();
6457         check_added_monitors!(nodes[1], 1);
6458
6459         let events = nodes[1].node.get_and_clear_pending_msg_events();
6460         assert_eq!(events.len(), 1);
6461         let (update_msg, commitment_signed) = match events[0] {
6462                 MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
6463                         (update_fee.as_ref(), commitment_signed)
6464                 },
6465                 _ => panic!("Unexpected event"),
6466         };
6467
6468         nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap());
6469
6470         let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2);
6471         let channel_reserve = chan_stat.channel_reserve_msat;
6472         let feerate = get_feerate!(nodes[0], chan_0_1.2);
6473
6474         // Send a payment which passes reserve checks but gets stuck in the holding cell.
6475         let feemsat = 239;
6476         let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
6477         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
6478         let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat;
6479         let payment_event = {
6480                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6481                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6482                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6483                 check_added_monitors!(nodes[0], 1);
6484
6485                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6486                 assert_eq!(events.len(), 1);
6487
6488                 SendEvent::from_event(events.remove(0))
6489         };
6490         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6491         check_added_monitors!(nodes[1], 0);
6492         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6493         expect_pending_htlcs_forwardable!(nodes[1]);
6494
6495         chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2);
6496         assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
6497
6498         // Flush the pending fee update.
6499         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
6500         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
6501         check_added_monitors!(nodes[2], 1);
6502         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa);
6503         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed);
6504         check_added_monitors!(nodes[1], 2);
6505
6506         // A final RAA message is generated to finalize the fee update.
6507         let events = nodes[1].node.get_and_clear_pending_msg_events();
6508         assert_eq!(events.len(), 1);
6509
6510         let raa_msg = match &events[0] {
6511                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => {
6512                         msg.clone()
6513                 },
6514                 _ => panic!("Unexpected event"),
6515         };
6516
6517         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg);
6518         check_added_monitors!(nodes[2], 1);
6519         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
6520
6521         // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
6522         let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
6523         assert_eq!(process_htlc_forwards_event.len(), 1);
6524         match &process_htlc_forwards_event[0] {
6525                 &Event::PendingHTLCsForwardable { .. } => {},
6526                 _ => panic!("Unexpected event"),
6527         }
6528
6529         // In response, we call ChannelManager's process_pending_htlc_forwards
6530         nodes[1].node.process_pending_htlc_forwards();
6531         check_added_monitors!(nodes[1], 1);
6532
6533         // This causes the HTLC to be failed backwards.
6534         let fail_event = nodes[1].node.get_and_clear_pending_msg_events();
6535         assert_eq!(fail_event.len(), 1);
6536         let (fail_msg, commitment_signed) = match &fail_event[0] {
6537                 &MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
6538                         assert_eq!(updates.update_add_htlcs.len(), 0);
6539                         assert_eq!(updates.update_fulfill_htlcs.len(), 0);
6540                         assert_eq!(updates.update_fail_malformed_htlcs.len(), 0);
6541                         assert_eq!(updates.update_fail_htlcs.len(), 1);
6542                         (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone())
6543                 },
6544                 _ => panic!("Unexpected event"),
6545         };
6546
6547         // Pass the failure messages back to nodes[0].
6548         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
6549         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed);
6550
6551         // Complete the HTLC failure+removal process.
6552         let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6553         check_added_monitors!(nodes[0], 1);
6554         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
6555         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed);
6556         check_added_monitors!(nodes[1], 2);
6557         let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events();
6558         assert_eq!(final_raa_event.len(), 1);
6559         let raa = match &final_raa_event[0] {
6560                 &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(),
6561                 _ => panic!("Unexpected event"),
6562         };
6563         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
6564         expect_payment_failure_chan_update!(nodes[0], chan_1_2.0.contents.short_channel_id, false);
6565         expect_payment_failed!(nodes[0], our_payment_hash, false);
6566         check_added_monitors!(nodes[0], 1);
6567 }
6568
6569 // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
6570 // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
6571 //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
6572
6573 #[test]
6574 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
6575         //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
6576         let chanmon_cfgs = create_chanmon_cfgs(2);
6577         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6578         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6579         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6580         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6581
6582         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6583         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6584         let logger = test_utils::TestLogger::new();
6585         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6586         route.paths[0][0].fee_msat = 100;
6587
6588         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6589                 assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
6590         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6591         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
6592 }
6593
6594 #[test]
6595 fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
6596         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6597         let chanmon_cfgs = create_chanmon_cfgs(2);
6598         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6599         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6600         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6601         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6602         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6603
6604         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6605         let logger = test_utils::TestLogger::new();
6606         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6607         route.paths[0][0].fee_msat = 0;
6608         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6609                 assert_eq!(err, "Cannot send 0-msat HTLC"));
6610
6611         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6612         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
6613 }
6614
6615 #[test]
6616 fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
6617         //BOLT2 Requirement: MUST offer amount_msat greater than 0.
6618         let chanmon_cfgs = create_chanmon_cfgs(2);
6619         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6620         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6621         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6622         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6623
6624         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6625         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6626         let logger = test_utils::TestLogger::new();
6627         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6628         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6629         check_added_monitors!(nodes[0], 1);
6630         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6631         updates.update_add_htlcs[0].amount_msat = 0;
6632
6633         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6634         nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
6635         check_closed_broadcast!(nodes[1], true).unwrap();
6636         check_added_monitors!(nodes[1], 1);
6637 }
6638
6639 #[test]
6640 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
6641         //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
6642         //It is enforced when constructing a route.
6643         let chanmon_cfgs = create_chanmon_cfgs(2);
6644         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6645         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6646         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6647         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
6648         let logger = test_utils::TestLogger::new();
6649
6650         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6651
6652         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6653         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000000, 500000001, &logger).unwrap();
6654         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::RouteError { ref err },
6655                 assert_eq!(err, &"Channel CLTV overflowed?"));
6656 }
6657
6658 #[test]
6659 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
6660         //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
6661         //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
6662         //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
6663         let chanmon_cfgs = create_chanmon_cfgs(2);
6664         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6665         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6666         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6667         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
6668         let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
6669
6670         let logger = test_utils::TestLogger::new();
6671         for i in 0..max_accepted_htlcs {
6672                 let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6673                 let payment_event = {
6674                         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6675                         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6676                         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6677                         check_added_monitors!(nodes[0], 1);
6678
6679                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6680                         assert_eq!(events.len(), 1);
6681                         if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
6682                                 assert_eq!(htlcs[0].htlc_id, i);
6683                         } else {
6684                                 assert!(false);
6685                         }
6686                         SendEvent::from_event(events.remove(0))
6687                 };
6688                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
6689                 check_added_monitors!(nodes[1], 0);
6690                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
6691
6692                 expect_pending_htlcs_forwardable!(nodes[1]);
6693                 expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
6694         }
6695         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6696         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6697         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
6698         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6699                 assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
6700
6701         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6702         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
6703 }
6704
6705 #[test]
6706 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
6707         //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
6708         let chanmon_cfgs = create_chanmon_cfgs(2);
6709         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6710         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6711         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6712         let channel_value = 100000;
6713         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, InitFeatures::known(), InitFeatures::known());
6714         let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat;
6715
6716         send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
6717
6718         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6719         // Manually create a route over our max in flight (which our router normally automatically
6720         // limits us to.
6721         let route = Route { paths: vec![vec![RouteHop {
6722            pubkey: nodes[1].node.get_our_node_id(), node_features: NodeFeatures::known(), channel_features: ChannelFeatures::known(),
6723            short_channel_id: nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(),
6724            fee_msat: max_in_flight + 1, cltv_expiry_delta: TEST_FINAL_CLTV
6725         }]] };
6726         unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
6727                 assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
6728
6729         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
6730         nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
6731
6732         send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
6733 }
6734
6735 // BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
6736 #[test]
6737 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
6738         //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
6739         let chanmon_cfgs = create_chanmon_cfgs(2);
6740         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6741         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6742         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6743         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6744         let htlc_minimum_msat: u64;
6745         {
6746                 let chan_lock = nodes[0].node.channel_state.lock().unwrap();
6747                 let channel = chan_lock.by_id.get(&chan.2).unwrap();
6748                 htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
6749         }
6750
6751         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6752         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6753         let logger = test_utils::TestLogger::new();
6754         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], htlc_minimum_msat, TEST_FINAL_CLTV, &logger).unwrap();
6755         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6756         check_added_monitors!(nodes[0], 1);
6757         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6758         updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
6759         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6760         assert!(nodes[1].node.list_channels().is_empty());
6761         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6762         assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6763         check_added_monitors!(nodes[1], 1);
6764 }
6765
6766 #[test]
6767 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
6768         //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
6769         let chanmon_cfgs = create_chanmon_cfgs(2);
6770         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6771         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6772         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6773         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6774         let logger = test_utils::TestLogger::new();
6775
6776         let chan_stat = get_channel_value_stat!(nodes[0], chan.2);
6777         let channel_reserve = chan_stat.channel_reserve_msat;
6778         let feerate = get_feerate!(nodes[0], chan.2);
6779         // The 2* and +1 are for the fee spike reserve.
6780         let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1);
6781
6782         let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
6783         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6784         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6785         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap();
6786         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6787         check_added_monitors!(nodes[0], 1);
6788         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6789
6790         // Even though channel-initiator senders are required to respect the fee_spike_reserve,
6791         // at this time channel-initiatee receivers are not required to enforce that senders
6792         // respect the fee_spike_reserve.
6793         updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1;
6794         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6795
6796         assert!(nodes[1].node.list_channels().is_empty());
6797         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6798         assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
6799         check_added_monitors!(nodes[1], 1);
6800 }
6801
6802 #[test]
6803 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
6804         //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
6805         //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
6806         let chanmon_cfgs = create_chanmon_cfgs(2);
6807         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6808         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6809         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6810         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6811         let logger = test_utils::TestLogger::new();
6812
6813         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6814         let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
6815
6816         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6817         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 3999999, TEST_FINAL_CLTV, &logger).unwrap();
6818
6819         let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
6820         let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
6821         let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], 3999999, &Some(our_payment_secret), cur_height, &None).unwrap();
6822         let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash);
6823
6824         let mut msg = msgs::UpdateAddHTLC {
6825                 channel_id: chan.2,
6826                 htlc_id: 0,
6827                 amount_msat: 1000,
6828                 payment_hash: our_payment_hash,
6829                 cltv_expiry: htlc_cltv,
6830                 onion_routing_packet: onion_packet.clone(),
6831         };
6832
6833         for i in 0..super::channel::OUR_MAX_HTLCS {
6834                 msg.htlc_id = i as u64;
6835                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6836         }
6837         msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
6838         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
6839
6840         assert!(nodes[1].node.list_channels().is_empty());
6841         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6842         assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
6843         check_added_monitors!(nodes[1], 1);
6844 }
6845
6846 #[test]
6847 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
6848         //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
6849         let chanmon_cfgs = create_chanmon_cfgs(2);
6850         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6851         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6852         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6853         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
6854         let logger = test_utils::TestLogger::new();
6855
6856         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6857         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6858         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6859         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6860         check_added_monitors!(nodes[0], 1);
6861         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6862         updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat + 1;
6863         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6864
6865         assert!(nodes[1].node.list_channels().is_empty());
6866         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6867         assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
6868         check_added_monitors!(nodes[1], 1);
6869 }
6870
6871 #[test]
6872 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
6873         //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
6874         let chanmon_cfgs = create_chanmon_cfgs(2);
6875         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6876         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6877         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6878         let logger = test_utils::TestLogger::new();
6879
6880         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
6881         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6882         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6883         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6884         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6885         check_added_monitors!(nodes[0], 1);
6886         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6887         updates.update_add_htlcs[0].cltv_expiry = 500000000;
6888         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6889
6890         assert!(nodes[1].node.list_channels().is_empty());
6891         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6892         assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
6893         check_added_monitors!(nodes[1], 1);
6894 }
6895
6896 #[test]
6897 fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
6898         //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
6899         // We test this by first testing that that repeated HTLCs pass commitment signature checks
6900         // after disconnect and that non-sequential htlc_ids result in a channel failure.
6901         let chanmon_cfgs = create_chanmon_cfgs(2);
6902         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6903         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6904         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6905         let logger = test_utils::TestLogger::new();
6906
6907         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6908         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6909         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6910         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6911         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6912         check_added_monitors!(nodes[0], 1);
6913         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6914         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6915
6916         //Disconnect and Reconnect
6917         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
6918         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
6919         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
6920         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
6921         assert_eq!(reestablish_1.len(), 1);
6922         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
6923         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
6924         assert_eq!(reestablish_2.len(), 1);
6925         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
6926         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
6927         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
6928         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
6929
6930         //Resend HTLC
6931         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6932         assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
6933         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
6934         check_added_monitors!(nodes[1], 1);
6935         let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
6936
6937         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6938
6939         assert!(nodes[1].node.list_channels().is_empty());
6940         let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
6941         assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
6942         check_added_monitors!(nodes[1], 1);
6943 }
6944
6945 #[test]
6946 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
6947         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6948
6949         let chanmon_cfgs = create_chanmon_cfgs(2);
6950         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6951         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6952         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6953         let logger = test_utils::TestLogger::new();
6954         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6955         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6956         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6957         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6958         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6959
6960         check_added_monitors!(nodes[0], 1);
6961         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6962         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6963
6964         let update_msg = msgs::UpdateFulfillHTLC{
6965                 channel_id: chan.2,
6966                 htlc_id: 0,
6967                 payment_preimage: our_payment_preimage,
6968         };
6969
6970         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
6971
6972         assert!(nodes[0].node.list_channels().is_empty());
6973         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
6974         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
6975         check_added_monitors!(nodes[0], 1);
6976 }
6977
6978 #[test]
6979 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
6980         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
6981
6982         let chanmon_cfgs = create_chanmon_cfgs(2);
6983         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6984         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6985         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6986         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6987         let logger = test_utils::TestLogger::new();
6988
6989         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
6990         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
6991         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
6992         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
6993         check_added_monitors!(nodes[0], 1);
6994         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
6995         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
6996
6997         let update_msg = msgs::UpdateFailHTLC{
6998                 channel_id: chan.2,
6999                 htlc_id: 0,
7000                 reason: msgs::OnionErrorPacket { data: Vec::new()},
7001         };
7002
7003         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7004
7005         assert!(nodes[0].node.list_channels().is_empty());
7006         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7007         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
7008         check_added_monitors!(nodes[0], 1);
7009 }
7010
7011 #[test]
7012 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
7013         //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
7014
7015         let chanmon_cfgs = create_chanmon_cfgs(2);
7016         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7017         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7018         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7019         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7020         let logger = test_utils::TestLogger::new();
7021
7022         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7023         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7024         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7025         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7026         check_added_monitors!(nodes[0], 1);
7027         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7028         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7029         let update_msg = msgs::UpdateFailMalformedHTLC{
7030                 channel_id: chan.2,
7031                 htlc_id: 0,
7032                 sha256_of_onion: [1; 32],
7033                 failure_code: 0x8000,
7034         };
7035
7036         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7037
7038         assert!(nodes[0].node.list_channels().is_empty());
7039         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7040         assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
7041         check_added_monitors!(nodes[0], 1);
7042 }
7043
7044 #[test]
7045 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
7046         //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
7047
7048         let chanmon_cfgs = create_chanmon_cfgs(2);
7049         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7050         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7051         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7052         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7053
7054         let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
7055
7056         nodes[1].node.claim_funds(our_payment_preimage);
7057         check_added_monitors!(nodes[1], 1);
7058
7059         let events = nodes[1].node.get_and_clear_pending_msg_events();
7060         assert_eq!(events.len(), 1);
7061         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
7062                 match events[0] {
7063                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7064                                 assert!(update_add_htlcs.is_empty());
7065                                 assert_eq!(update_fulfill_htlcs.len(), 1);
7066                                 assert!(update_fail_htlcs.is_empty());
7067                                 assert!(update_fail_malformed_htlcs.is_empty());
7068                                 assert!(update_fee.is_none());
7069                                 update_fulfill_htlcs[0].clone()
7070                         },
7071                         _ => panic!("Unexpected event"),
7072                 }
7073         };
7074
7075         update_fulfill_msg.htlc_id = 1;
7076
7077         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
7078
7079         assert!(nodes[0].node.list_channels().is_empty());
7080         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7081         assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
7082         check_added_monitors!(nodes[0], 1);
7083 }
7084
7085 #[test]
7086 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
7087         //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
7088
7089         let chanmon_cfgs = create_chanmon_cfgs(2);
7090         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7091         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7092         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7093         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7094
7095         let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
7096
7097         nodes[1].node.claim_funds(our_payment_preimage);
7098         check_added_monitors!(nodes[1], 1);
7099
7100         let events = nodes[1].node.get_and_clear_pending_msg_events();
7101         assert_eq!(events.len(), 1);
7102         let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
7103                 match events[0] {
7104                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7105                                 assert!(update_add_htlcs.is_empty());
7106                                 assert_eq!(update_fulfill_htlcs.len(), 1);
7107                                 assert!(update_fail_htlcs.is_empty());
7108                                 assert!(update_fail_malformed_htlcs.is_empty());
7109                                 assert!(update_fee.is_none());
7110                                 update_fulfill_htlcs[0].clone()
7111                         },
7112                         _ => panic!("Unexpected event"),
7113                 }
7114         };
7115
7116         update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
7117
7118         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
7119
7120         assert!(nodes[0].node.list_channels().is_empty());
7121         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7122         assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
7123         check_added_monitors!(nodes[0], 1);
7124 }
7125
7126 #[test]
7127 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
7128         //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
7129
7130         let chanmon_cfgs = create_chanmon_cfgs(2);
7131         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7132         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7133         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7134         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7135         let logger = test_utils::TestLogger::new();
7136
7137         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
7138         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7139         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 1000000, TEST_FINAL_CLTV, &logger).unwrap();
7140         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7141         check_added_monitors!(nodes[0], 1);
7142
7143         let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7144         updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
7145
7146         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7147         check_added_monitors!(nodes[1], 0);
7148         commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
7149
7150         let events = nodes[1].node.get_and_clear_pending_msg_events();
7151
7152         let mut update_msg: msgs::UpdateFailMalformedHTLC = {
7153                 match events[0] {
7154                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7155                                 assert!(update_add_htlcs.is_empty());
7156                                 assert!(update_fulfill_htlcs.is_empty());
7157                                 assert!(update_fail_htlcs.is_empty());
7158                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
7159                                 assert!(update_fee.is_none());
7160                                 update_fail_malformed_htlcs[0].clone()
7161                         },
7162                         _ => panic!("Unexpected event"),
7163                 }
7164         };
7165         update_msg.failure_code &= !0x8000;
7166         nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
7167
7168         assert!(nodes[0].node.list_channels().is_empty());
7169         let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
7170         assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
7171         check_added_monitors!(nodes[0], 1);
7172 }
7173
7174 #[test]
7175 fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
7176         //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
7177         //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
7178
7179         let chanmon_cfgs = create_chanmon_cfgs(3);
7180         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7181         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7182         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7183         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7184         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7185         let logger = test_utils::TestLogger::new();
7186
7187         let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
7188
7189         //First hop
7190         let mut payment_event = {
7191                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
7192                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
7193                 nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7194                 check_added_monitors!(nodes[0], 1);
7195                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7196                 assert_eq!(events.len(), 1);
7197                 SendEvent::from_event(events.remove(0))
7198         };
7199         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7200         check_added_monitors!(nodes[1], 0);
7201         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7202         expect_pending_htlcs_forwardable!(nodes[1]);
7203         let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
7204         assert_eq!(events_2.len(), 1);
7205         check_added_monitors!(nodes[1], 1);
7206         payment_event = SendEvent::from_event(events_2.remove(0));
7207         assert_eq!(payment_event.msgs.len(), 1);
7208
7209         //Second Hop
7210         payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
7211         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
7212         check_added_monitors!(nodes[2], 0);
7213         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
7214
7215         let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
7216         assert_eq!(events_3.len(), 1);
7217         let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
7218                 match events_3[0] {
7219                         MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7220                                 assert!(update_add_htlcs.is_empty());
7221                                 assert!(update_fulfill_htlcs.is_empty());
7222                                 assert!(update_fail_htlcs.is_empty());
7223                                 assert_eq!(update_fail_malformed_htlcs.len(), 1);
7224                                 assert!(update_fee.is_none());
7225                                 (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
7226                         },
7227                         _ => panic!("Unexpected event"),
7228                 }
7229         };
7230
7231         nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0);
7232
7233         check_added_monitors!(nodes[1], 0);
7234         commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
7235         expect_pending_htlcs_forwardable!(nodes[1]);
7236         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
7237         assert_eq!(events_4.len(), 1);
7238
7239         //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
7240         match events_4[0] {
7241                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
7242                         assert!(update_add_htlcs.is_empty());
7243                         assert!(update_fulfill_htlcs.is_empty());
7244                         assert_eq!(update_fail_htlcs.len(), 1);
7245                         assert!(update_fail_malformed_htlcs.is_empty());
7246                         assert!(update_fee.is_none());
7247                 },
7248                 _ => panic!("Unexpected event"),
7249         };
7250
7251         check_added_monitors!(nodes[1], 1);
7252 }
7253
7254 fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
7255         // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
7256         // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
7257         // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
7258
7259         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7260         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7261         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7262         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7263         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7264         let chan =create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7265
7266         let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
7267
7268         // We route 2 dust-HTLCs between A and B
7269         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7270         let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7271         route_payment(&nodes[0], &[&nodes[1]], 1000000);
7272
7273         // Cache one local commitment tx as previous
7274         let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7275
7276         // Fail one HTLC to prune it in the will-be-latest-local commitment tx
7277         assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
7278         check_added_monitors!(nodes[1], 0);
7279         expect_pending_htlcs_forwardable!(nodes[1]);
7280         check_added_monitors!(nodes[1], 1);
7281
7282         let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7283         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]);
7284         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed);
7285         check_added_monitors!(nodes[0], 1);
7286
7287         // Cache one local commitment tx as lastest
7288         let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7289
7290         let events = nodes[0].node.get_and_clear_pending_msg_events();
7291         match events[0] {
7292                 MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
7293                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7294                 },
7295                 _ => panic!("Unexpected event"),
7296         }
7297         match events[1] {
7298                 MessageSendEvent::UpdateHTLCs { node_id, .. } => {
7299                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7300                 },
7301                 _ => panic!("Unexpected event"),
7302         }
7303
7304         assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
7305         // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
7306         if announce_latest {
7307                 mine_transaction(&nodes[0], &as_last_commitment_tx[0]);
7308         } else {
7309                 mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
7310         }
7311
7312         check_closed_broadcast!(nodes[0], true);
7313         check_added_monitors!(nodes[0], 1);
7314
7315         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7316         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7317         let events = nodes[0].node.get_and_clear_pending_events();
7318         // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
7319         assert_eq!(events.len(), 2);
7320         let mut first_failed = false;
7321         for event in events {
7322                 match event {
7323                         Event::PaymentFailed { payment_hash, .. } => {
7324                                 if payment_hash == payment_hash_1 {
7325                                         assert!(!first_failed);
7326                                         first_failed = true;
7327                                 } else {
7328                                         assert_eq!(payment_hash, payment_hash_2);
7329                                 }
7330                         }
7331                         _ => panic!("Unexpected event"),
7332                 }
7333         }
7334 }
7335
7336 #[test]
7337 fn test_failure_delay_dust_htlc_local_commitment() {
7338         do_test_failure_delay_dust_htlc_local_commitment(true);
7339         do_test_failure_delay_dust_htlc_local_commitment(false);
7340 }
7341
7342 fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
7343         // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
7344         // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
7345         // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
7346         // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
7347         // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
7348         // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
7349
7350         let chanmon_cfgs = create_chanmon_cfgs(3);
7351         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7352         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
7353         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7354         let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7355
7356         let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
7357
7358         let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
7359         let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7360
7361         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
7362         let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
7363
7364         // We revoked bs_commitment_tx
7365         if revoked {
7366                 let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
7367                 claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
7368         }
7369
7370         let mut timeout_tx = Vec::new();
7371         if local {
7372                 // We fail dust-HTLC 1 by broadcast of local commitment tx
7373                 mine_transaction(&nodes[0], &as_commitment_tx[0]);
7374                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7375                 expect_payment_failed!(nodes[0], dust_hash, true);
7376
7377                 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY);
7378                 check_closed_broadcast!(nodes[0], true);
7379                 check_added_monitors!(nodes[0], 1);
7380                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7381                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
7382                 assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
7383                 // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
7384                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7385                 mine_transaction(&nodes[0], &timeout_tx[0]);
7386                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7387                 expect_payment_failed!(nodes[0], non_dust_hash, true);
7388         } else {
7389                 // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
7390                 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
7391                 check_closed_broadcast!(nodes[0], true);
7392                 check_added_monitors!(nodes[0], 1);
7393                 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7394                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
7395                 timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
7396                 if !revoked {
7397                         expect_payment_failed!(nodes[0], dust_hash, true);
7398                         assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
7399                         // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
7400                         mine_transaction(&nodes[0], &timeout_tx[0]);
7401                         assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
7402                         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7403                         expect_payment_failed!(nodes[0], non_dust_hash, true);
7404                 } else {
7405                         // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
7406                         // commitment tx
7407                         let events = nodes[0].node.get_and_clear_pending_events();
7408                         assert_eq!(events.len(), 2);
7409                         let first;
7410                         match events[0] {
7411                                 Event::PaymentFailed { payment_hash, .. } => {
7412                                         if payment_hash == dust_hash { first = true; }
7413                                         else { first = false; }
7414                                 },
7415                                 _ => panic!("Unexpected event"),
7416                         }
7417                         match events[1] {
7418                                 Event::PaymentFailed { payment_hash, .. } => {
7419                                         if first { assert_eq!(payment_hash, non_dust_hash); }
7420                                         else { assert_eq!(payment_hash, dust_hash); }
7421                                 },
7422                                 _ => panic!("Unexpected event"),
7423                         }
7424                 }
7425         }
7426 }
7427
7428 #[test]
7429 fn test_sweep_outbound_htlc_failure_update() {
7430         do_test_sweep_outbound_htlc_failure_update(false, true);
7431         do_test_sweep_outbound_htlc_failure_update(false, false);
7432         do_test_sweep_outbound_htlc_failure_update(true, false);
7433 }
7434
7435 #[test]
7436 fn test_upfront_shutdown_script() {
7437         // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
7438         // enforce it at shutdown message
7439
7440         let mut config = UserConfig::default();
7441         config.channel_options.announced_channel = true;
7442         config.peer_channel_config_limits.force_announced_channel_preference = false;
7443         config.channel_options.commit_upfront_shutdown_pubkey = false;
7444         let user_cfgs = [None, Some(config), None];
7445         let chanmon_cfgs = create_chanmon_cfgs(3);
7446         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7447         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7448         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7449
7450         // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
7451         let flags = InitFeatures::known();
7452         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
7453         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7454         let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
7455         node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
7456         // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
7457         nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7458     assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
7459         check_added_monitors!(nodes[2], 1);
7460
7461         // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
7462         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
7463         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7464         let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
7465         // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
7466         nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7467         let events = nodes[2].node.get_and_clear_pending_msg_events();
7468         assert_eq!(events.len(), 1);
7469         match events[0] {
7470                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
7471                 _ => panic!("Unexpected event"),
7472         }
7473
7474         // We test that if case of peer non-signaling we don't enforce committed script at channel opening
7475         let flags_no = InitFeatures::known().clear_upfront_shutdown_script();
7476         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
7477         nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7478         let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
7479         node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
7480         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
7481         let events = nodes[1].node.get_and_clear_pending_msg_events();
7482         assert_eq!(events.len(), 1);
7483         match events[0] {
7484                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
7485                 _ => panic!("Unexpected event"),
7486         }
7487
7488         // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
7489         // channel smoothly, opt-out is from channel initiator here
7490         let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
7491         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7492         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7493         node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
7494         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7495         let events = nodes[0].node.get_and_clear_pending_msg_events();
7496         assert_eq!(events.len(), 1);
7497         match events[0] {
7498                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7499                 _ => panic!("Unexpected event"),
7500         }
7501
7502         //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
7503         //// channel smoothly
7504         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
7505         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7506         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7507         node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
7508         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7509         let events = nodes[0].node.get_and_clear_pending_msg_events();
7510         assert_eq!(events.len(), 2);
7511         match events[0] {
7512                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7513                 _ => panic!("Unexpected event"),
7514         }
7515         match events[1] {
7516                 MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7517                 _ => panic!("Unexpected event"),
7518         }
7519 }
7520
7521 #[test]
7522 fn test_upfront_shutdown_script_unsupport_segwit() {
7523         // We test that channel is closed early
7524         // if a segwit program is passed as upfront shutdown script,
7525         // but the peer does not support segwit.
7526         let chanmon_cfgs = create_chanmon_cfgs(2);
7527         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7528         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7529         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7530
7531         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
7532
7533         let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
7534         open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(16)
7535                 .push_slice(&[0, 0])
7536                 .into_script());
7537
7538         let features = InitFeatures::known().clear_shutdown_anysegwit();
7539         nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), features, &open_channel);
7540
7541         let events = nodes[0].node.get_and_clear_pending_msg_events();
7542         assert_eq!(events.len(), 1);
7543         match events[0] {
7544                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7545                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
7546                         assert!(regex::Regex::new(r"Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: (\([A-Fa-f0-9]+\))").unwrap().is_match(&*msg.data));
7547                 },
7548                 _ => panic!("Unexpected event"),
7549         }
7550 }
7551
7552 #[test]
7553 fn test_shutdown_script_any_segwit_allowed() {
7554         let mut config = UserConfig::default();
7555         config.channel_options.announced_channel = true;
7556         config.peer_channel_config_limits.force_announced_channel_preference = false;
7557         config.channel_options.commit_upfront_shutdown_pubkey = false;
7558         let user_cfgs = [None, Some(config), None];
7559         let chanmon_cfgs = create_chanmon_cfgs(3);
7560         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7561         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7562         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7563
7564         //// We test if the remote peer accepts opt_shutdown_anysegwit, a witness program can be used on shutdown
7565         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7566         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7567         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7568         node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
7569                 .push_slice(&[0, 0])
7570                 .into_script();
7571         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7572         let events = nodes[0].node.get_and_clear_pending_msg_events();
7573         assert_eq!(events.len(), 2);
7574         match events[0] {
7575                 MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7576                 _ => panic!("Unexpected event"),
7577         }
7578         match events[1] {
7579                 MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
7580                 _ => panic!("Unexpected event"),
7581         }
7582 }
7583
7584 #[test]
7585 fn test_shutdown_script_any_segwit_not_allowed() {
7586         let mut config = UserConfig::default();
7587         config.channel_options.announced_channel = true;
7588         config.peer_channel_config_limits.force_announced_channel_preference = false;
7589         config.channel_options.commit_upfront_shutdown_pubkey = false;
7590         let user_cfgs = [None, Some(config), None];
7591         let chanmon_cfgs = create_chanmon_cfgs(3);
7592         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7593         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7594         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7595
7596         //// We test that if the remote peer does not accept opt_shutdown_anysegwit, the witness program cannot be used on shutdown
7597         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7598         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7599         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7600         // Make an any segwit version script
7601         node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
7602                 .push_slice(&[0, 0])
7603                 .into_script();
7604         let flags_no = InitFeatures::known().clear_shutdown_anysegwit();
7605         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &flags_no, &node_0_shutdown);
7606         let events = nodes[0].node.get_and_clear_pending_msg_events();
7607         assert_eq!(events.len(), 2);
7608         match events[1] {
7609                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7610                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7611                         assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020000) from remote peer".to_owned())
7612                 },
7613                 _ => panic!("Unexpected event"),
7614         }
7615         check_added_monitors!(nodes[0], 1);
7616 }
7617
7618 #[test]
7619 fn test_shutdown_script_segwit_but_not_anysegwit() {
7620         let mut config = UserConfig::default();
7621         config.channel_options.announced_channel = true;
7622         config.peer_channel_config_limits.force_announced_channel_preference = false;
7623         config.channel_options.commit_upfront_shutdown_pubkey = false;
7624         let user_cfgs = [None, Some(config), None];
7625         let chanmon_cfgs = create_chanmon_cfgs(3);
7626         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7627         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
7628         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7629
7630         //// We test that if shutdown any segwit is supported and we send a witness script with 0 version, this is not accepted
7631         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7632         nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
7633         let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
7634         // Make a segwit script that is not a valid as any segwit
7635         node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
7636                 .push_slice(&[0, 0])
7637                 .into_script();
7638         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
7639         let events = nodes[0].node.get_and_clear_pending_msg_events();
7640         assert_eq!(events.len(), 2);
7641         match events[1] {
7642                 MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
7643                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
7644                         assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
7645                 },
7646                 _ => panic!("Unexpected event"),
7647         }
7648         check_added_monitors!(nodes[0], 1);
7649 }
7650
7651 #[test]
7652 fn test_user_configurable_csv_delay() {
7653         // We test our channel constructors yield errors when we pass them absurd csv delay
7654
7655         let mut low_our_to_self_config = UserConfig::default();
7656         low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
7657         let mut high_their_to_self_config = UserConfig::default();
7658         high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
7659         let user_cfgs = [Some(high_their_to_self_config.clone()), None];
7660         let chanmon_cfgs = create_chanmon_cfgs(2);
7661         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7662         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
7663         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7664
7665         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
7666         if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
7667                 match error {
7668                         APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
7669                         _ => panic!("Unexpected event"),
7670                 }
7671         } else { assert!(false) }
7672
7673         // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
7674         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7675         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7676         open_channel.to_self_delay = 200;
7677         if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
7678                 match error {
7679                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
7680                         _ => panic!("Unexpected event"),
7681                 }
7682         } else { assert!(false); }
7683
7684         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
7685         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7686         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
7687         let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
7688         accept_channel.to_self_delay = 200;
7689         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
7690         if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
7691                 match action {
7692                         &ErrorAction::SendErrorMessage { ref msg } => {
7693                                 assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
7694                         },
7695                         _ => { assert!(false); }
7696                 }
7697         } else { assert!(false); }
7698
7699         // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
7700         nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
7701         let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
7702         open_channel.to_self_delay = 200;
7703         if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
7704                 match error {
7705                         ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
7706                         _ => panic!("Unexpected event"),
7707                 }
7708         } else { assert!(false); }
7709 }
7710
7711 #[test]
7712 fn test_data_loss_protect() {
7713         // We want to be sure that :
7714         // * we don't broadcast our Local Commitment Tx in case of fallen behind
7715         //   (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
7716         // * we close channel in case of detecting other being fallen behind
7717         // * we are able to claim our own outputs thanks to to_remote being static
7718         // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
7719         let persister;
7720         let logger;
7721         let fee_estimator;
7722         let tx_broadcaster;
7723         let chain_source;
7724         let mut chanmon_cfgs = create_chanmon_cfgs(2);
7725         // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
7726         // during signing due to revoked tx
7727         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
7728         let keys_manager = &chanmon_cfgs[0].keys_manager;
7729         let monitor;
7730         let node_state_0;
7731         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7732         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7733         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7734
7735         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
7736
7737         // Cache node A state before any channel update
7738         let previous_node_state = nodes[0].node.encode();
7739         let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
7740         nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap();
7741
7742         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
7743         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
7744
7745         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7746         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7747
7748         // Restore node A from previous state
7749         logger = test_utils::TestLogger::with_id(format!("node {}", 0));
7750         let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
7751         chain_source = test_utils::TestChainSource::new(Network::Testnet);
7752         tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
7753         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
7754         persister = test_utils::TestPersister::new();
7755         monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
7756         node_state_0 = {
7757                 let mut channel_monitors = HashMap::new();
7758                 channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
7759                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
7760                         keys_manager: keys_manager,
7761                         fee_estimator: &fee_estimator,
7762                         chain_monitor: &monitor,
7763                         logger: &logger,
7764                         tx_broadcaster: &tx_broadcaster,
7765                         default_config: UserConfig::default(),
7766                         channel_monitors,
7767                 }).unwrap().1
7768         };
7769         nodes[0].node = &node_state_0;
7770         assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
7771         nodes[0].chain_monitor = &monitor;
7772         nodes[0].chain_source = &chain_source;
7773
7774         check_added_monitors!(nodes[0], 1);
7775
7776         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7777         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7778
7779         let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7780
7781         // Check we don't broadcast any transactions following learning of per_commitment_point from B
7782         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
7783         check_added_monitors!(nodes[0], 1);
7784
7785         {
7786                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7787                 assert_eq!(node_txn.len(), 0);
7788         }
7789
7790         let mut reestablish_1 = Vec::with_capacity(1);
7791         for msg in nodes[0].node.get_and_clear_pending_msg_events() {
7792                 if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
7793                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
7794                         reestablish_1.push(msg.clone());
7795                 } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
7796                 } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
7797                         match action {
7798                                 &ErrorAction::SendErrorMessage { ref msg } => {
7799                                         assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
7800                                 },
7801                                 _ => panic!("Unexpected event!"),
7802                         }
7803                 } else {
7804                         panic!("Unexpected event")
7805                 }
7806         }
7807
7808         // Check we close channel detecting A is fallen-behind
7809         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7810         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
7811         check_added_monitors!(nodes[1], 1);
7812
7813
7814         // Check A is able to claim to_remote output
7815         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
7816         assert_eq!(node_txn.len(), 1);
7817         check_spends!(node_txn[0], chan.3);
7818         assert_eq!(node_txn[0].output.len(), 2);
7819         mine_transaction(&nodes[0], &node_txn[0]);
7820         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
7821         let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
7822         assert_eq!(spend_txn.len(), 1);
7823         check_spends!(spend_txn[0], node_txn[0]);
7824 }
7825
7826 #[test]
7827 fn test_check_htlc_underpaying() {
7828         // Send payment through A -> B but A is maliciously
7829         // sending a probe payment (i.e less than expected value0
7830         // to B, B should refuse payment.
7831
7832         let chanmon_cfgs = create_chanmon_cfgs(2);
7833         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7834         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7835         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7836
7837         // Create some initial channels
7838         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7839
7840         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap();
7841         let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
7842         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap();
7843         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
7844         check_added_monitors!(nodes[0], 1);
7845
7846         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7847         assert_eq!(events.len(), 1);
7848         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
7849         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7850         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7851
7852         // Note that we first have to wait a random delay before processing the receipt of the HTLC,
7853         // and then will wait a second random delay before failing the HTLC back:
7854         expect_pending_htlcs_forwardable!(nodes[1]);
7855         expect_pending_htlcs_forwardable!(nodes[1]);
7856
7857         // Node 3 is expecting payment of 100_000 but received 10_000,
7858         // it should fail htlc like we didn't know the preimage.
7859         nodes[1].node.process_pending_htlc_forwards();
7860
7861         let events = nodes[1].node.get_and_clear_pending_msg_events();
7862         assert_eq!(events.len(), 1);
7863         let (update_fail_htlc, commitment_signed) = match events[0] {
7864                 MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
7865                         assert!(update_add_htlcs.is_empty());
7866                         assert!(update_fulfill_htlcs.is_empty());
7867                         assert_eq!(update_fail_htlcs.len(), 1);
7868                         assert!(update_fail_malformed_htlcs.is_empty());
7869                         assert!(update_fee.is_none());
7870                         (update_fail_htlcs[0].clone(), commitment_signed)
7871                 },
7872                 _ => panic!("Unexpected event"),
7873         };
7874         check_added_monitors!(nodes[1], 1);
7875
7876         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlc);
7877         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
7878
7879         // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
7880         let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec();
7881         expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH));
7882         expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
7883 }
7884
7885 #[test]
7886 fn test_announce_disable_channels() {
7887         // Create 2 channels between A and B. Disconnect B. Call timer_tick_occurred and check for generated
7888         // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
7889
7890         let chanmon_cfgs = create_chanmon_cfgs(2);
7891         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7892         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7893         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7894
7895         let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7896         let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7897         let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7898
7899         // Disconnect peers
7900         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
7901         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
7902
7903         nodes[0].node.timer_tick_occurred(); // Enabled -> DisabledStaged
7904         nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
7905         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7906         assert_eq!(msg_events.len(), 3);
7907         let mut chans_disabled: HashSet<u64> = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
7908         for e in msg_events {
7909                 match e {
7910                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7911                                 assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
7912                                 // Check that each channel gets updated exactly once
7913                                 if !chans_disabled.remove(&msg.contents.short_channel_id) {
7914                                         panic!("Generated ChannelUpdate for wrong chan!");
7915                                 }
7916                         },
7917                         _ => panic!("Unexpected event"),
7918                 }
7919         }
7920         // Reconnect peers
7921         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7922         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
7923         assert_eq!(reestablish_1.len(), 3);
7924         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
7925         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
7926         assert_eq!(reestablish_2.len(), 3);
7927
7928         // Reestablish chan_1
7929         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
7930         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7931         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
7932         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7933         // Reestablish chan_2
7934         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[1]);
7935         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7936         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[1]);
7937         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7938         // Reestablish chan_3
7939         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[2]);
7940         handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
7941         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[2]);
7942         handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
7943
7944         nodes[0].node.timer_tick_occurred();
7945         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
7946         nodes[0].node.timer_tick_occurred();
7947         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
7948         assert_eq!(msg_events.len(), 3);
7949         chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
7950         for e in msg_events {
7951                 match e {
7952                         MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
7953                                 assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
7954                                 // Check that each channel gets updated exactly once
7955                                 if !chans_disabled.remove(&msg.contents.short_channel_id) {
7956                                         panic!("Generated ChannelUpdate for wrong chan!");
7957                                 }
7958                         },
7959                         _ => panic!("Unexpected event"),
7960                 }
7961         }
7962 }
7963
7964 #[test]
7965 fn test_priv_forwarding_rejection() {
7966         // If we have a private channel with outbound liquidity, and
7967         // UserConfig::accept_forwards_to_priv_channels is set to false, we should reject any attempts
7968         // to forward through that channel.
7969         let chanmon_cfgs = create_chanmon_cfgs(3);
7970         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
7971         let mut no_announce_cfg = test_default_channel_config();
7972         no_announce_cfg.channel_options.announced_channel = false;
7973         no_announce_cfg.accept_forwards_to_priv_channels = false;
7974         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(no_announce_cfg), None]);
7975         let persister: test_utils::TestPersister;
7976         let new_chain_monitor: test_utils::TestChainMonitor;
7977         let nodes_1_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
7978         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
7979
7980         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known());
7981
7982         // Note that the create_*_chan functions in utils requires announcement_signatures, which we do
7983         // not send for private channels.
7984         nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
7985         let open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[2].node.get_our_node_id());
7986         nodes[2].node.handle_open_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel);
7987         let accept_channel = get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[1].node.get_our_node_id());
7988         nodes[1].node.handle_accept_channel(&nodes[2].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
7989
7990         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[1], 1_000_000, 42);
7991         nodes[1].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
7992         nodes[2].node.handle_funding_created(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[2].node.get_our_node_id()));
7993         check_added_monitors!(nodes[2], 1);
7994
7995         nodes[1].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()));
7996         check_added_monitors!(nodes[1], 1);
7997
7998         let conf_height = core::cmp::max(nodes[1].best_block_info().1 + 1, nodes[2].best_block_info().1 + 1);
7999         confirm_transaction_at(&nodes[1], &tx, conf_height);
8000         connect_blocks(&nodes[1], CHAN_CONFIRM_DEPTH - 1);
8001         confirm_transaction_at(&nodes[2], &tx, conf_height);
8002         connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1);
8003         let as_funding_locked = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[2].node.get_our_node_id());
8004         nodes[1].node.handle_funding_locked(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
8005         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
8006         nodes[2].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &as_funding_locked);
8007         get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8008
8009         assert!(nodes[0].node.list_usable_channels()[0].is_public);
8010         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
8011         assert!(!nodes[2].node.list_usable_channels()[0].is_public);
8012
8013         // We should always be able to forward through nodes[1] as long as its out through a public
8014         // channel:
8015         send_payment(&nodes[2], &[&nodes[1], &nodes[0]], 10_000);
8016
8017         // ... however, if we send to nodes[2], we will have to pass the private channel from nodes[1]
8018         // to nodes[2], which should be rejected:
8019         let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
8020         let route = get_route(&nodes[0].node.get_our_node_id(),
8021                 &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
8022                 &nodes[2].node.get_our_node_id(), Some(InvoiceFeatures::known()), None,
8023                 &[&RouteHint(vec![RouteHintHop {
8024                         src_node_id: nodes[1].node.get_our_node_id(),
8025                         short_channel_id: nodes[2].node.list_channels()[0].short_channel_id.unwrap(),
8026                         fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 },
8027                         cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
8028                         htlc_minimum_msat: None,
8029                         htlc_maximum_msat: None,
8030                 }])], 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap();
8031
8032         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
8033         check_added_monitors!(nodes[0], 1);
8034         let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
8035         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8036         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
8037
8038         let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8039         assert!(htlc_fail_updates.update_add_htlcs.is_empty());
8040         assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1);
8041         assert!(htlc_fail_updates.update_fail_malformed_htlcs.is_empty());
8042         assert!(htlc_fail_updates.update_fee.is_none());
8043
8044         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
8045         commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true);
8046         expect_payment_failed!(nodes[0], our_payment_hash, false);
8047         expect_payment_failure_chan_update!(nodes[0], nodes[2].node.list_channels()[0].short_channel_id.unwrap(), true);
8048
8049         // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set
8050         // to true. Sadly there is currently no way to change it at runtime.
8051
8052         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8053         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
8054
8055         let nodes_1_serialized = nodes[1].node.encode();
8056         let mut monitor_a_serialized = test_utils::TestVecWriter(Vec::new());
8057         let mut monitor_b_serialized = test_utils::TestVecWriter(Vec::new());
8058         {
8059                 let mons = nodes[1].chain_monitor.chain_monitor.monitors.read().unwrap();
8060                 let mut mon_iter = mons.iter();
8061                 mon_iter.next().unwrap().1.write(&mut monitor_a_serialized).unwrap();
8062                 mon_iter.next().unwrap().1.write(&mut monitor_b_serialized).unwrap();
8063         }
8064
8065         persister = test_utils::TestPersister::new();
8066         let keys_manager = &chanmon_cfgs[1].keys_manager;
8067         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[1].chain_source), nodes[1].tx_broadcaster.clone(), nodes[1].logger, node_cfgs[1].fee_estimator, &persister, keys_manager);
8068         nodes[1].chain_monitor = &new_chain_monitor;
8069
8070         let mut monitor_a_read = &monitor_a_serialized.0[..];
8071         let mut monitor_b_read = &monitor_b_serialized.0[..];
8072         let (_, mut monitor_a) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_a_read, keys_manager).unwrap();
8073         let (_, mut monitor_b) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut monitor_b_read, keys_manager).unwrap();
8074         assert!(monitor_a_read.is_empty());
8075         assert!(monitor_b_read.is_empty());
8076
8077         no_announce_cfg.accept_forwards_to_priv_channels = true;
8078
8079         let mut nodes_1_read = &nodes_1_serialized[..];
8080         let (_, nodes_1_deserialized_tmp) = {
8081                 let mut channel_monitors = HashMap::new();
8082                 channel_monitors.insert(monitor_a.get_funding_txo().0, &mut monitor_a);
8083                 channel_monitors.insert(monitor_b.get_funding_txo().0, &mut monitor_b);
8084                 <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_1_read, ChannelManagerReadArgs {
8085                         default_config: no_announce_cfg,
8086                         keys_manager,
8087                         fee_estimator: node_cfgs[1].fee_estimator,
8088                         chain_monitor: nodes[1].chain_monitor,
8089                         tx_broadcaster: nodes[1].tx_broadcaster.clone(),
8090                         logger: nodes[1].logger,
8091                         channel_monitors,
8092                 }).unwrap()
8093         };
8094         assert!(nodes_1_read.is_empty());
8095         nodes_1_deserialized = nodes_1_deserialized_tmp;
8096
8097         assert!(nodes[1].chain_monitor.watch_channel(monitor_a.get_funding_txo().0, monitor_a).is_ok());
8098         assert!(nodes[1].chain_monitor.watch_channel(monitor_b.get_funding_txo().0, monitor_b).is_ok());
8099         check_added_monitors!(nodes[1], 2);
8100         nodes[1].node = &nodes_1_deserialized;
8101
8102         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
8103         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8104         let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
8105         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
8106         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
8107         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
8108         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8109         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
8110
8111         nodes[1].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
8112         nodes[2].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
8113         let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[2].node.get_our_node_id());
8114         let cs_reestablish = get_event_msg!(nodes[2], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
8115         nodes[2].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
8116         nodes[1].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &cs_reestablish);
8117         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id());
8118         get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
8119
8120         nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap();
8121         check_added_monitors!(nodes[0], 1);
8122         pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 10_000, our_payment_hash, our_payment_secret);
8123         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage);
8124 }
8125
8126 #[test]
8127 fn test_bump_penalty_txn_on_revoked_commitment() {
8128         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
8129         // we're able to claim outputs on revoked commitment transaction before timelocks expiration
8130
8131         let chanmon_cfgs = create_chanmon_cfgs(2);
8132         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8133         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8134         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8135
8136         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8137         let logger = test_utils::TestLogger::new();
8138
8139         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
8140         let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
8141         let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000, 30, &logger).unwrap();
8142         send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
8143
8144         let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
8145         // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
8146         assert_eq!(revoked_txn[0].output.len(), 4);
8147         assert_eq!(revoked_txn[0].input.len(), 1);
8148         assert_eq!(revoked_txn[0].input[0].previous_output.txid, chan.3.txid());
8149         let revoked_txid = revoked_txn[0].txid();
8150
8151         let mut penalty_sum = 0;
8152         for outp in revoked_txn[0].output.iter() {
8153                 if outp.script_pubkey.is_v0_p2wsh() {
8154                         penalty_sum += outp.value;
8155                 }
8156         }
8157
8158         // Connect blocks to change height_timer range to see if we use right soonest_timelock
8159         let header_114 = connect_blocks(&nodes[1], 14);
8160
8161         // Actually revoke tx by claiming a HTLC
8162         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8163         let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8164         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] });
8165         check_added_monitors!(nodes[1], 1);
8166
8167         // One or more justice tx should have been broadcast, check it
8168         let penalty_1;
8169         let feerate_1;
8170         {
8171                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8172                 assert_eq!(node_txn.len(), 2); // justice tx (broadcasted from ChannelMonitor) + local commitment tx
8173                 assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8174                 assert_eq!(node_txn[0].output.len(), 1);
8175                 check_spends!(node_txn[0], revoked_txn[0]);
8176                 let fee_1 = penalty_sum - node_txn[0].output[0].value;
8177                 feerate_1 = fee_1 * 1000 / node_txn[0].get_weight() as u64;
8178                 penalty_1 = node_txn[0].txid();
8179                 node_txn.clear();
8180         };
8181
8182         // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
8183         connect_blocks(&nodes[1], 15);
8184         let mut penalty_2 = penalty_1;
8185         let mut feerate_2 = 0;
8186         {
8187                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8188                 assert_eq!(node_txn.len(), 1);
8189                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
8190                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8191                         assert_eq!(node_txn[0].output.len(), 1);
8192                         check_spends!(node_txn[0], revoked_txn[0]);
8193                         penalty_2 = node_txn[0].txid();
8194                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
8195                         assert_ne!(penalty_2, penalty_1);
8196                         let fee_2 = penalty_sum - node_txn[0].output[0].value;
8197                         feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
8198                         // Verify 25% bump heuristic
8199                         assert!(feerate_2 * 100 >= feerate_1 * 125);
8200                         node_txn.clear();
8201                 }
8202         }
8203         assert_ne!(feerate_2, 0);
8204
8205         // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
8206         connect_blocks(&nodes[1], 1);
8207         let penalty_3;
8208         let mut feerate_3 = 0;
8209         {
8210                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8211                 assert_eq!(node_txn.len(), 1);
8212                 if node_txn[0].input[0].previous_output.txid == revoked_txid {
8213                         assert_eq!(node_txn[0].input.len(), 3); // Penalty txn claims to_local, offered_htlc and received_htlc outputs
8214                         assert_eq!(node_txn[0].output.len(), 1);
8215                         check_spends!(node_txn[0], revoked_txn[0]);
8216                         penalty_3 = node_txn[0].txid();
8217                         // Verify new bumped tx is different from last claiming transaction, we don't want spurrious rebroadcast
8218                         assert_ne!(penalty_3, penalty_2);
8219                         let fee_3 = penalty_sum - node_txn[0].output[0].value;
8220                         feerate_3 = fee_3 * 1000 / node_txn[0].get_weight() as u64;
8221                         // Verify 25% bump heuristic
8222                         assert!(feerate_3 * 100 >= feerate_2 * 125);
8223                         node_txn.clear();
8224                 }
8225         }
8226         assert_ne!(feerate_3, 0);
8227
8228         nodes[1].node.get_and_clear_pending_events();
8229         nodes[1].node.get_and_clear_pending_msg_events();
8230 }
8231
8232 #[test]
8233 fn test_bump_penalty_txn_on_revoked_htlcs() {
8234         // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
8235         // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
8236
8237         let mut chanmon_cfgs = create_chanmon_cfgs(2);
8238         chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
8239         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8240         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8241         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8242
8243         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8244         // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
8245         let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
8246                 &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap();
8247         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
8248         let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph.read().unwrap(),
8249                 &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap();
8250         send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
8251
8252         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
8253         assert_eq!(revoked_local_txn[0].input.len(), 1);
8254         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
8255
8256         // Revoke local commitment tx
8257         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8258
8259         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8260         // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
8261         connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
8262         check_closed_broadcast!(nodes[1], true);
8263         check_added_monitors!(nodes[1], 1);
8264         connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
8265
8266         let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8267         assert_eq!(revoked_htlc_txn.len(), 3);
8268         check_spends!(revoked_htlc_txn[1], chan.3);
8269
8270         assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
8271         assert_eq!(revoked_htlc_txn[0].input.len(), 1);
8272         check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]);
8273
8274         assert_eq!(revoked_htlc_txn[2].input.len(), 1);
8275         assert_eq!(revoked_htlc_txn[2].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
8276         assert_eq!(revoked_htlc_txn[2].output.len(), 1);
8277         check_spends!(revoked_htlc_txn[2], revoked_local_txn[0]);
8278
8279         // Broadcast set of revoked txn on A
8280         let hash_128 = connect_blocks(&nodes[0], 40);
8281         let header_11 = BlockHeader { version: 0x20000000, prev_blockhash: hash_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8282         connect_block(&nodes[0], &Block { header: header_11, txdata: vec![revoked_local_txn[0].clone()] });
8283         let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8284         connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
8285         expect_pending_htlcs_forwardable_ignore!(nodes[0]);
8286         let first;
8287         let feerate_1;
8288         let penalty_txn;
8289         {
8290                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8291                 assert_eq!(node_txn.len(), 5); // 3 penalty txn on revoked commitment tx + A commitment tx + 1 penalty tnx on revoked HTLC txn
8292                 // Verify claim tx are spending revoked HTLC txn
8293
8294                 // node_txn 0-2 each spend a separate revoked output from revoked_local_txn[0]
8295                 // Note that node_txn[0] and node_txn[1] are bogus - they double spend the revoked_htlc_txn
8296                 // which are included in the same block (they are broadcasted because we scan the
8297                 // transactions linearly and generate claims as we go, they likely should be removed in the
8298                 // future).
8299                 assert_eq!(node_txn[0].input.len(), 1);
8300                 check_spends!(node_txn[0], revoked_local_txn[0]);
8301                 assert_eq!(node_txn[1].input.len(), 1);
8302                 check_spends!(node_txn[1], revoked_local_txn[0]);
8303                 assert_eq!(node_txn[2].input.len(), 1);
8304                 check_spends!(node_txn[2], revoked_local_txn[0]);
8305
8306                 // Each of the three justice transactions claim a separate (single) output of the three
8307                 // available, which we check here:
8308                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output);
8309                 assert_ne!(node_txn[0].input[0].previous_output, node_txn[2].input[0].previous_output);
8310                 assert_ne!(node_txn[1].input[0].previous_output, node_txn[2].input[0].previous_output);
8311
8312                 assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
8313                 assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8314
8315                 // node_txn[3] is the local commitment tx broadcast just because (and somewhat in case of
8316                 // reorgs, though its not clear its ever worth broadcasting conflicting txn like this when
8317                 // a remote commitment tx has already been confirmed).
8318                 check_spends!(node_txn[3], chan.3);
8319
8320                 // node_txn[4] spends the revoked outputs from the revoked_htlc_txn (which only have one
8321                 // output, checked above).
8322                 assert_eq!(node_txn[4].input.len(), 2);
8323                 assert_eq!(node_txn[4].output.len(), 1);
8324                 check_spends!(node_txn[4], revoked_htlc_txn[0], revoked_htlc_txn[2]);
8325
8326                 first = node_txn[4].txid();
8327                 // Store both feerates for later comparison
8328                 let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[4].output[0].value;
8329                 feerate_1 = fee_1 * 1000 / node_txn[4].get_weight() as u64;
8330                 penalty_txn = vec![node_txn[2].clone()];
8331                 node_txn.clear();
8332         }
8333
8334         // Connect one more block to see if bumped penalty are issued for HTLC txn
8335         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8336         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
8337         let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8338         connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
8339         {
8340                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8341                 assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
8342
8343                 check_spends!(node_txn[0], revoked_local_txn[0]);
8344                 check_spends!(node_txn[1], revoked_local_txn[0]);
8345                 // Note that these are both bogus - they spend outputs already claimed in block 129:
8346                 if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output  {
8347                         assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8348                 } else {
8349                         assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8350                         assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
8351                 }
8352
8353                 node_txn.clear();
8354         };
8355
8356         // Few more blocks to confirm penalty txn
8357         connect_blocks(&nodes[0], 4);
8358         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
8359         let header_144 = connect_blocks(&nodes[0], 9);
8360         let node_txn = {
8361                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8362                 assert_eq!(node_txn.len(), 1);
8363
8364                 assert_eq!(node_txn[0].input.len(), 2);
8365                 check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[2]);
8366                 // Verify bumped tx is different and 25% bump heuristic
8367                 assert_ne!(first, node_txn[0].txid());
8368                 let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[2].output[0].value - node_txn[0].output[0].value;
8369                 let feerate_2 = fee_2 * 1000 / node_txn[0].get_weight() as u64;
8370                 assert!(feerate_2 * 100 > feerate_1 * 125);
8371                 let txn = vec![node_txn[0].clone()];
8372                 node_txn.clear();
8373                 txn
8374         };
8375         // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
8376         let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8377         connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn });
8378         connect_blocks(&nodes[0], 20);
8379         {
8380                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8381                 // We verify than no new transaction has been broadcast because previously
8382                 // we were buggy on this exact behavior by not tracking for monitoring remote HTLC outputs (see #411)
8383                 // which means we wouldn't see a spend of them by a justice tx and bumped justice tx
8384                 // were generated forever instead of safe cleaning after confirmation and ANTI_REORG_SAFE_DELAY blocks.
8385                 // Enforce spending of revoked htlc output by claiming transaction remove request as expected and dry
8386                 // up bumped justice generation.
8387                 assert_eq!(node_txn.len(), 0);
8388                 node_txn.clear();
8389         }
8390         check_closed_broadcast!(nodes[0], true);
8391         check_added_monitors!(nodes[0], 1);
8392 }
8393
8394 #[test]
8395 fn test_bump_penalty_txn_on_remote_commitment() {
8396         // In case of claim txn with too low feerates for getting into mempools, RBF-bump them to be sure
8397         // we're able to claim outputs on remote commitment transaction before timelocks expiration
8398
8399         // Create 2 HTLCs
8400         // Provide preimage for one
8401         // Check aggregation
8402
8403         let chanmon_cfgs = create_chanmon_cfgs(2);
8404         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8405         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8406         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8407
8408         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8409         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
8410         route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
8411
8412         // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
8413         let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
8414         assert_eq!(remote_txn[0].output.len(), 4);
8415         assert_eq!(remote_txn[0].input.len(), 1);
8416         assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
8417
8418         // Claim a HTLC without revocation (provide B monitor with preimage)
8419         nodes[1].node.claim_funds(payment_preimage);
8420         mine_transaction(&nodes[1], &remote_txn[0]);
8421         check_added_monitors!(nodes[1], 2);
8422         connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
8423
8424         // One or more claim tx should have been broadcast, check it
8425         let timeout;
8426         let preimage;
8427         let preimage_bump;
8428         let feerate_timeout;
8429         let feerate_preimage;
8430         {
8431                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8432                 // 9 transactions including:
8433                 // 1*2 ChannelManager local broadcasts of commitment + HTLC-Success
8434                 // 1*3 ChannelManager local broadcasts of commitment + HTLC-Success + HTLC-Timeout
8435                 // 2 * HTLC-Success (one RBF bump we'll check later)
8436                 // 1 * HTLC-Timeout
8437                 assert_eq!(node_txn.len(), 8);
8438                 assert_eq!(node_txn[0].input.len(), 1);
8439                 assert_eq!(node_txn[6].input.len(), 1);
8440                 check_spends!(node_txn[0], remote_txn[0]);
8441                 check_spends!(node_txn[6], remote_txn[0]);
8442                 assert_eq!(node_txn[0].input[0].previous_output, node_txn[3].input[0].previous_output);
8443                 preimage_bump = node_txn[3].clone();
8444
8445                 check_spends!(node_txn[1], chan.3);
8446                 check_spends!(node_txn[2], node_txn[1]);
8447                 assert_eq!(node_txn[1], node_txn[4]);
8448                 assert_eq!(node_txn[2], node_txn[5]);
8449
8450                 timeout = node_txn[6].txid();
8451                 let index = node_txn[6].input[0].previous_output.vout;
8452                 let fee = remote_txn[0].output[index as usize].value - node_txn[6].output[0].value;
8453                 feerate_timeout = fee * 1000 / node_txn[6].get_weight() as u64;
8454
8455                 preimage = node_txn[0].txid();
8456                 let index = node_txn[0].input[0].previous_output.vout;
8457                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
8458                 feerate_preimage = fee * 1000 / node_txn[0].get_weight() as u64;
8459
8460                 node_txn.clear();
8461         };
8462         assert_ne!(feerate_timeout, 0);
8463         assert_ne!(feerate_preimage, 0);
8464
8465         // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
8466         connect_blocks(&nodes[1], 15);
8467         {
8468                 let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
8469                 assert_eq!(node_txn.len(), 1);
8470                 assert_eq!(node_txn[0].input.len(), 1);
8471                 assert_eq!(preimage_bump.input.len(), 1);
8472                 check_spends!(node_txn[0], remote_txn[0]);
8473                 check_spends!(preimage_bump, remote_txn[0]);
8474
8475                 let index = preimage_bump.input[0].previous_output.vout;
8476                 let fee = remote_txn[0].output[index as usize].value - preimage_bump.output[0].value;
8477                 let new_feerate = fee * 1000 / preimage_bump.get_weight() as u64;
8478                 assert!(new_feerate * 100 > feerate_timeout * 125);
8479                 assert_ne!(timeout, preimage_bump.txid());
8480
8481                 let index = node_txn[0].input[0].previous_output.vout;
8482                 let fee = remote_txn[0].output[index as usize].value - node_txn[0].output[0].value;
8483                 let new_feerate = fee * 1000 / node_txn[0].get_weight() as u64;
8484                 assert!(new_feerate * 100 > feerate_preimage * 125);
8485                 assert_ne!(preimage, node_txn[0].txid());
8486
8487                 node_txn.clear();
8488         }
8489
8490         nodes[1].node.get_and_clear_pending_events();
8491         nodes[1].node.get_and_clear_pending_msg_events();
8492 }
8493
8494 #[test]
8495 fn test_counterparty_raa_skip_no_crash() {
8496         // Previously, if our counterparty sent two RAAs in a row without us having provided a
8497         // commitment transaction, we would have happily carried on and provided them the next
8498         // commitment transaction based on one RAA forward. This would probably eventually have led to
8499         // channel closure, but it would not have resulted in funds loss. Still, our
8500         // EnforcingSigner would have paniced as it doesn't like jumps into the future. Here, we
8501         // check simply that the channel is closed in response to such an RAA, but don't check whether
8502         // we decide to punish our counterparty for revoking their funds (as we don't currently
8503         // implement that).
8504         let chanmon_cfgs = create_chanmon_cfgs(2);
8505         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8506         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8507         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8508         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
8509
8510         let mut guard = nodes[0].node.channel_state.lock().unwrap();
8511         let keys = &guard.by_id.get_mut(&channel_id).unwrap().get_signer();
8512         const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
8513         let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
8514         // Must revoke without gaps
8515         keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
8516         let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
8517                 &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
8518
8519         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
8520                 &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
8521         assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
8522         check_added_monitors!(nodes[1], 1);
8523 }
8524
8525 #[test]
8526 fn test_bump_txn_sanitize_tracking_maps() {
8527         // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
8528         // verify we clean then right after expiration of ANTI_REORG_DELAY.
8529
8530         let chanmon_cfgs = create_chanmon_cfgs(2);
8531         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8532         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8533         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8534
8535         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
8536         // Lock HTLC in both directions
8537         let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8538         route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
8539
8540         let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
8541         assert_eq!(revoked_local_txn[0].input.len(), 1);
8542         assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
8543
8544         // Revoke local commitment tx
8545         claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
8546
8547         // Broadcast set of revoked txn on A
8548         connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
8549         expect_pending_htlcs_forwardable_ignore!(nodes[0]);
8550         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
8551
8552         mine_transaction(&nodes[0], &revoked_local_txn[0]);
8553         check_closed_broadcast!(nodes[0], true);
8554         check_added_monitors!(nodes[0], 1);
8555         let penalty_txn = {
8556                 let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8557                 assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
8558                 check_spends!(node_txn[0], revoked_local_txn[0]);
8559                 check_spends!(node_txn[1], revoked_local_txn[0]);
8560                 check_spends!(node_txn[2], revoked_local_txn[0]);
8561                 let penalty_txn = vec![node_txn[0].clone(), node_txn[1].clone(), node_txn[2].clone()];
8562                 node_txn.clear();
8563                 penalty_txn
8564         };
8565         let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8566         connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
8567         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
8568         {
8569                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
8570                 if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
8571                         assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
8572                         assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
8573                 }
8574         }
8575 }
8576
8577 #[test]
8578 fn test_override_channel_config() {
8579         let chanmon_cfgs = create_chanmon_cfgs(2);
8580         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8581         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8582         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8583
8584         // Node0 initiates a channel to node1 using the override config.
8585         let mut override_config = UserConfig::default();
8586         override_config.own_channel_config.our_to_self_delay = 200;
8587
8588         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
8589
8590         // Assert the channel created by node0 is using the override config.
8591         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8592         assert_eq!(res.channel_flags, 0);
8593         assert_eq!(res.to_self_delay, 200);
8594 }
8595
8596 #[test]
8597 fn test_override_0msat_htlc_minimum() {
8598         let mut zero_config = UserConfig::default();
8599         zero_config.own_channel_config.our_htlc_minimum_msat = 0;
8600         let chanmon_cfgs = create_chanmon_cfgs(2);
8601         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8602         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
8603         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8604
8605         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
8606         let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8607         assert_eq!(res.htlc_minimum_msat, 1);
8608
8609         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &res);
8610         let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8611         assert_eq!(res.htlc_minimum_msat, 1);
8612 }
8613
8614 #[test]
8615 fn test_simple_mpp() {
8616         // Simple test of sending a multi-path payment.
8617         let chanmon_cfgs = create_chanmon_cfgs(4);
8618         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8619         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8620         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8621
8622         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8623         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8624         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8625         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8626         let logger = test_utils::TestLogger::new();
8627
8628         let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
8629         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8630         let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
8631         let path = route.paths[0].clone();
8632         route.paths.push(path);
8633         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
8634         route.paths[0][0].short_channel_id = chan_1_id;
8635         route.paths[0][1].short_channel_id = chan_3_id;
8636         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
8637         route.paths[1][0].short_channel_id = chan_2_id;
8638         route.paths[1][1].short_channel_id = chan_4_id;
8639         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
8640         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
8641 }
8642
8643 #[test]
8644 fn test_preimage_storage() {
8645         // Simple test of payment preimage storage allowing no client-side storage to claim payments
8646         let chanmon_cfgs = create_chanmon_cfgs(2);
8647         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8648         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8649         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8650
8651         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8652
8653         {
8654                 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, 42);
8655
8656                 let logger = test_utils::TestLogger::new();
8657                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8658                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8659                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
8660                 check_added_monitors!(nodes[0], 1);
8661                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8662                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8663                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8664                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8665         }
8666         // Note that after leaving the above scope we have no knowledge of any arguments or return
8667         // values from previous calls.
8668         expect_pending_htlcs_forwardable!(nodes[1]);
8669         let events = nodes[1].node.get_and_clear_pending_events();
8670         assert_eq!(events.len(), 1);
8671         match events[0] {
8672                 Event::PaymentReceived { ref purpose, .. } => {
8673                         match &purpose {
8674                                 PaymentPurpose::InvoicePayment { payment_preimage, user_payment_id, .. } => {
8675                                         assert_eq!(*user_payment_id, 42);
8676                                         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
8677                                 },
8678                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
8679                         }
8680                 },
8681                 _ => panic!("Unexpected event"),
8682         }
8683 }
8684
8685 #[test]
8686 fn test_secret_timeout() {
8687         // Simple test of payment secret storage time outs
8688         let chanmon_cfgs = create_chanmon_cfgs(2);
8689         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8690         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8691         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8692
8693         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8694
8695         let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
8696
8697         // We should fail to register the same payment hash twice, at least until we've connected a
8698         // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
8699         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
8700                 assert_eq!(err, "Duplicate payment hash");
8701         } else { panic!(); }
8702         let mut block = {
8703                 let node_1_blocks = nodes[1].blocks.lock().unwrap();
8704                 Block {
8705                         header: BlockHeader {
8706                                 version: 0x2000000,
8707                                 prev_blockhash: node_1_blocks.last().unwrap().0.block_hash(),
8708                                 merkle_root: Default::default(),
8709                                 time: node_1_blocks.len() as u32 + 7200, bits: 42, nonce: 42 },
8710                         txdata: vec![],
8711                 }
8712         };
8713         connect_block(&nodes[1], &block);
8714         if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 0) {
8715                 assert_eq!(err, "Duplicate payment hash");
8716         } else { panic!(); }
8717
8718         // If we then connect the second block, we should be able to register the same payment hash
8719         // again with a different user_payment_id (this time getting a new payment secret).
8720         block.header.prev_blockhash = block.header.block_hash();
8721         block.header.time += 1;
8722         connect_block(&nodes[1], &block);
8723         let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2, 42).unwrap();
8724         assert_ne!(payment_secret_1, our_payment_secret);
8725
8726         {
8727                 let logger = test_utils::TestLogger::new();
8728                 let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8729                 let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8730                 nodes[0].node.send_payment(&route, payment_hash, &Some(our_payment_secret)).unwrap();
8731                 check_added_monitors!(nodes[0], 1);
8732                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8733                 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
8734                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8735                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8736         }
8737         // Note that after leaving the above scope we have no knowledge of any arguments or return
8738         // values from previous calls.
8739         expect_pending_htlcs_forwardable!(nodes[1]);
8740         let events = nodes[1].node.get_and_clear_pending_events();
8741         assert_eq!(events.len(), 1);
8742         match events[0] {
8743                 Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, user_payment_id }, .. } => {
8744                         assert!(payment_preimage.is_none());
8745                         assert_eq!(user_payment_id, 42);
8746                         assert_eq!(payment_secret, our_payment_secret);
8747                         // We don't actually have the payment preimage with which to claim this payment!
8748                 },
8749                 _ => panic!("Unexpected event"),
8750         }
8751 }
8752
8753 #[test]
8754 fn test_bad_secret_hash() {
8755         // Simple test of unregistered payment hash/invalid payment secret handling
8756         let chanmon_cfgs = create_chanmon_cfgs(2);
8757         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8758         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8759         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8760
8761         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
8762
8763         let random_payment_hash = PaymentHash([42; 32]);
8764         let random_payment_secret = PaymentSecret([43; 32]);
8765         let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, 0);
8766
8767         let logger = test_utils::TestLogger::new();
8768         let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
8769         let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100_000, TEST_FINAL_CLTV, &logger).unwrap();
8770
8771         // All the below cases should end up being handled exactly identically, so we macro the
8772         // resulting events.
8773         macro_rules! handle_unknown_invalid_payment_data {
8774                 () => {
8775                         check_added_monitors!(nodes[0], 1);
8776                         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8777                         let payment_event = SendEvent::from_event(events.pop().unwrap());
8778                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8779                         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8780
8781                         // We have to forward pending HTLCs once to process the receipt of the HTLC and then
8782                         // again to process the pending backwards-failure of the HTLC
8783                         expect_pending_htlcs_forwardable!(nodes[1]);
8784                         expect_pending_htlcs_forwardable!(nodes[1]);
8785                         check_added_monitors!(nodes[1], 1);
8786
8787                         // We should fail the payment back
8788                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
8789                         match events.pop().unwrap() {
8790                                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => {
8791                                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
8792                                         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
8793                                 },
8794                                 _ => panic!("Unexpected event"),
8795                         }
8796                 }
8797         }
8798
8799         let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details
8800         // Error data is the HTLC value (100,000) and current block height
8801         let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8];
8802
8803         // Send a payment with the right payment hash but the wrong payment secret
8804         nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
8805         handle_unknown_invalid_payment_data!();
8806         expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
8807
8808         // Send a payment with a random payment hash, but the right payment secret
8809         nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
8810         handle_unknown_invalid_payment_data!();
8811         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8812
8813         // Send a payment with a random payment hash and random payment secret
8814         nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
8815         handle_unknown_invalid_payment_data!();
8816         expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
8817 }
8818
8819 #[test]
8820 fn test_update_err_monitor_lockdown() {
8821         // Our monitor will lock update of local commitment transaction if a broadcastion condition
8822         // has been fulfilled (either force-close from Channel or block height requiring a HTLC-
8823         // timeout). Trying to update monitor after lockdown should return a ChannelMonitorUpdateErr.
8824         //
8825         // This scenario may happen in a watchtower setup, where watchtower process a block height
8826         // triggering a timeout while a slow-block-processing ChannelManager receives a local signed
8827         // commitment at same time.
8828
8829         let chanmon_cfgs = create_chanmon_cfgs(2);
8830         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8831         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8832         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8833
8834         // Create some initial channel
8835         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
8836         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8837
8838         // Rebalance the network to generate htlc in the two directions
8839         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8840
8841         // Route a HTLC from node 0 to node 1 (but don't settle)
8842         let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8843
8844         // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
8845         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8846         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
8847         let persister = test_utils::TestPersister::new();
8848         let watchtower = {
8849                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
8850                 let monitor = monitors.get(&outpoint).unwrap();
8851                 let mut w = test_utils::TestVecWriter(Vec::new());
8852                 monitor.write(&mut w).unwrap();
8853                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8854                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
8855                 assert!(new_monitor == *monitor);
8856                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8857                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
8858                 watchtower
8859         };
8860         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8861         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8862         // transaction lock time requirements here.
8863         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize(200, (header, 0));
8864         watchtower.chain_monitor.block_connected(&Block { header, txdata: vec![] }, 200);
8865
8866         // Try to update ChannelMonitor
8867         assert!(nodes[1].node.claim_funds(preimage));
8868         check_added_monitors!(nodes[1], 1);
8869         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8870         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
8871         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
8872         if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
8873                 if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
8874                         if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
8875                         if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
8876                 } else { assert!(false); }
8877         } else { assert!(false); };
8878         // Our local monitor is in-sync and hasn't processed yet timeout
8879         check_added_monitors!(nodes[0], 1);
8880         let events = nodes[0].node.get_and_clear_pending_events();
8881         assert_eq!(events.len(), 1);
8882 }
8883
8884 #[test]
8885 fn test_concurrent_monitor_claim() {
8886         // Watchtower A receives block, broadcasts state N, then channel receives new state N+1,
8887         // sending it to both watchtowers, Bob accepts N+1, then receives block and broadcasts
8888         // the latest state N+1, Alice rejects state N+1, but Bob has already broadcast it,
8889         // state N+1 confirms. Alice claims output from state N+1.
8890
8891         let chanmon_cfgs = create_chanmon_cfgs(2);
8892         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8893         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8894         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8895
8896         // Create some initial channel
8897         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
8898         let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 };
8899
8900         // Rebalance the network to generate htlc in the two directions
8901         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
8902
8903         // Route a HTLC from node 0 to node 1 (but don't settle)
8904         route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
8905
8906         // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
8907         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8908         let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
8909         let persister = test_utils::TestPersister::new();
8910         let watchtower_alice = {
8911                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
8912                 let monitor = monitors.get(&outpoint).unwrap();
8913                 let mut w = test_utils::TestVecWriter(Vec::new());
8914                 monitor.write(&mut w).unwrap();
8915                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8916                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
8917                 assert!(new_monitor == *monitor);
8918                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8919                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
8920                 watchtower
8921         };
8922         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8923         // Make the tx_broadcaster aware of enough blocks that it doesn't think we're violating
8924         // transaction lock time requirements here.
8925         chanmon_cfgs[0].tx_broadcaster.blocks.lock().unwrap().resize((CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS) as usize, (header, 0));
8926         watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8927
8928         // Watchtower Alice should have broadcast a commitment/HTLC-timeout
8929         {
8930                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8931                 assert_eq!(txn.len(), 2);
8932                 txn.clear();
8933         }
8934
8935         // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
8936         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
8937         let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
8938         let persister = test_utils::TestPersister::new();
8939         let watchtower_bob = {
8940                 let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
8941                 let monitor = monitors.get(&outpoint).unwrap();
8942                 let mut w = test_utils::TestVecWriter(Vec::new());
8943                 monitor.write(&mut w).unwrap();
8944                 let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
8945                                 &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
8946                 assert!(new_monitor == *monitor);
8947                 let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
8948                 assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
8949                 watchtower
8950         };
8951         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8952         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8953
8954         // Route another payment to generate another update with still previous HTLC pending
8955         let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]);
8956         {
8957                 let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
8958                 let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3000000 , TEST_FINAL_CLTV, &logger).unwrap();
8959                 nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
8960         }
8961         check_added_monitors!(nodes[1], 1);
8962
8963         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8964         assert_eq!(updates.update_add_htlcs.len(), 1);
8965         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8966         if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
8967                 if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
8968                         // Watchtower Alice should already have seen the block and reject the update
8969                         if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
8970                         if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
8971                         if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
8972                 } else { assert!(false); }
8973         } else { assert!(false); };
8974         // Our local monitor is in-sync and hasn't processed yet timeout
8975         check_added_monitors!(nodes[0], 1);
8976
8977         //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
8978         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8979         watchtower_bob.chain_monitor.block_connected(&Block { header, txdata: vec![] }, CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8980
8981         // Watchtower Bob should have broadcast a commitment/HTLC-timeout
8982         let bob_state_y;
8983         {
8984                 let mut txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8985                 assert_eq!(txn.len(), 2);
8986                 bob_state_y = txn[0].clone();
8987                 txn.clear();
8988         };
8989
8990         // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
8991         let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
8992         watchtower_alice.chain_monitor.block_connected(&Block { header, txdata: vec![bob_state_y.clone()] }, CHAN_CONFIRM_DEPTH + 2 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
8993         {
8994                 let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8995                 // We broadcast twice the transaction, once due to the HTLC-timeout, once due
8996                 // the onchain detection of the HTLC output
8997                 assert_eq!(htlc_txn.len(), 2);
8998                 check_spends!(htlc_txn[0], bob_state_y);
8999                 check_spends!(htlc_txn[1], bob_state_y);
9000         }
9001 }
9002
9003 #[test]
9004 fn test_pre_lockin_no_chan_closed_update() {
9005         // Test that if a peer closes a channel in response to a funding_created message we don't
9006         // generate a channel update (as the channel cannot appear on chain without a funding_signed
9007         // message).
9008         //
9009         // Doing so would imply a channel monitor update before the initial channel monitor
9010         // registration, violating our API guarantees.
9011         //
9012         // Previously, full_stack_target managed to hit this case by opening then closing a channel,
9013         // then opening a second channel with the same funding output as the first (which is not
9014         // rejected because the first channel does not exist in the ChannelManager) and closing it
9015         // before receiving funding_signed.
9016         let chanmon_cfgs = create_chanmon_cfgs(2);
9017         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9018         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9019         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9020
9021         // Create an initial channel
9022         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9023         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9024         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9025         let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
9026         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
9027
9028         // Move the first channel through the funding flow...
9029         let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42);
9030
9031         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
9032         check_added_monitors!(nodes[0], 0);
9033
9034         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9035         let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
9036         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
9037         assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
9038 }
9039
9040 #[test]
9041 fn test_htlc_no_detection() {
9042         // This test is a mutation to underscore the detection logic bug we had
9043         // before #653. HTLC value routed is above the remaining balance, thus
9044         // inverting HTLC and `to_remote` output. HTLC will come second and
9045         // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
9046         // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
9047         // outputs order detection for correct spending children filtring.
9048
9049         let chanmon_cfgs = create_chanmon_cfgs(2);
9050         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9051         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9052         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9053
9054         // Create some initial channels
9055         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9056
9057         send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
9058         let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
9059         let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
9060         assert_eq!(local_txn[0].input.len(), 1);
9061         assert_eq!(local_txn[0].output.len(), 3);
9062         check_spends!(local_txn[0], chan_1.3);
9063
9064         // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
9065         let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9066         connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
9067         // We deliberately connect the local tx twice as this should provoke a failure calling
9068         // this test before #653 fix.
9069         chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
9070         check_closed_broadcast!(nodes[0], true);
9071         check_added_monitors!(nodes[0], 1);
9072         connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
9073
9074         let htlc_timeout = {
9075                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
9076                 assert_eq!(node_txn[1].input.len(), 1);
9077                 assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
9078                 check_spends!(node_txn[1], local_txn[0]);
9079                 node_txn[1].clone()
9080         };
9081
9082         let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
9083         connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
9084         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
9085         expect_payment_failed!(nodes[0], our_payment_hash, true);
9086 }
9087
9088 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
9089         // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
9090         // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
9091         // Carol, Alice would be the upstream node, and Carol the downstream.)
9092         //
9093         // Steps of the test:
9094         // 1) Alice sends a HTLC to Carol through Bob.
9095         // 2) Carol doesn't settle the HTLC.
9096         // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
9097         // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
9098         // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
9099         //    but can't be claimed as Bob doesn't have yet knowledge of the preimage.
9100         // 5) Carol release the preimage to Bob off-chain.
9101         // 6) Bob claims the offered output on the broadcasted commitment.
9102         let chanmon_cfgs = create_chanmon_cfgs(3);
9103         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9104         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9105         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9106
9107         // Create some initial channels
9108         let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9109         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9110
9111         // Steps (1) and (2):
9112         // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
9113         let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000);
9114
9115         // Check that Alice's commitment transaction now contains an output for this HTLC.
9116         let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
9117         check_spends!(alice_txn[0], chan_ab.3);
9118         assert_eq!(alice_txn[0].output.len(), 2);
9119         check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
9120         assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
9121         assert_eq!(alice_txn.len(), 2);
9122
9123         // Steps (3) and (4):
9124         // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
9125         // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
9126         let mut force_closing_node = 0; // Alice force-closes
9127         if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
9128         nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
9129         check_closed_broadcast!(nodes[force_closing_node], true);
9130         check_added_monitors!(nodes[force_closing_node], 1);
9131         if go_onchain_before_fulfill {
9132                 let txn_to_broadcast = match broadcast_alice {
9133                         true => alice_txn.clone(),
9134                         false => get_local_commitment_txn!(nodes[1], chan_ab.2)
9135                 };
9136                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
9137                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
9138                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
9139                 if broadcast_alice {
9140                         check_closed_broadcast!(nodes[1], true);
9141                         check_added_monitors!(nodes[1], 1);
9142                 }
9143                 assert_eq!(bob_txn.len(), 1);
9144                 check_spends!(bob_txn[0], chan_ab.3);
9145         }
9146
9147         // Step (5):
9148         // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
9149         // process of removing the HTLC from their commitment transactions.
9150         assert!(nodes[2].node.claim_funds(payment_preimage));
9151         check_added_monitors!(nodes[2], 1);
9152         let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
9153         assert!(carol_updates.update_add_htlcs.is_empty());
9154         assert!(carol_updates.update_fail_htlcs.is_empty());
9155         assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
9156         assert!(carol_updates.update_fee.is_none());
9157         assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
9158
9159         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
9160         // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
9161         if !go_onchain_before_fulfill && broadcast_alice {
9162                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9163                 assert_eq!(events.len(), 1);
9164                 match events[0] {
9165                         MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
9166                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9167                         },
9168                         _ => panic!("Unexpected event"),
9169                 };
9170         }
9171         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
9172         // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
9173         // Carol<->Bob's updated commitment transaction info.
9174         check_added_monitors!(nodes[1], 2);
9175
9176         let events = nodes[1].node.get_and_clear_pending_msg_events();
9177         assert_eq!(events.len(), 2);
9178         let bob_revocation = match events[0] {
9179                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
9180                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
9181                         (*msg).clone()
9182                 },
9183                 _ => panic!("Unexpected event"),
9184         };
9185         let bob_updates = match events[1] {
9186                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
9187                         assert_eq!(*node_id, nodes[2].node.get_our_node_id());
9188                         (*updates).clone()
9189                 },
9190                 _ => panic!("Unexpected event"),
9191         };
9192
9193         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
9194         check_added_monitors!(nodes[2], 1);
9195         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
9196         check_added_monitors!(nodes[2], 1);
9197
9198         let events = nodes[2].node.get_and_clear_pending_msg_events();
9199         assert_eq!(events.len(), 1);
9200         let carol_revocation = match events[0] {
9201                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
9202                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
9203                         (*msg).clone()
9204                 },
9205                 _ => panic!("Unexpected event"),
9206         };
9207         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
9208         check_added_monitors!(nodes[1], 1);
9209
9210         // If this test requires the force-closed channel to not be on-chain until after the fulfill,
9211         // here's where we put said channel's commitment tx on-chain.
9212         let mut txn_to_broadcast = alice_txn.clone();
9213         if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
9214         if !go_onchain_before_fulfill {
9215                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
9216                 connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
9217                 // If Bob was the one to force-close, he will have already passed these checks earlier.
9218                 if broadcast_alice {
9219                         check_closed_broadcast!(nodes[1], true);
9220                         check_added_monitors!(nodes[1], 1);
9221                 }
9222                 let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
9223                 if broadcast_alice {
9224                         // In `connect_block()`, the ChainMonitor and ChannelManager are separately notified about a
9225                         // new block being connected. The ChannelManager being notified triggers a monitor update,
9226                         // which triggers broadcasting our commitment tx and an HTLC-claiming tx. The ChainMonitor
9227                         // being notified triggers the HTLC-claiming tx redundantly, resulting in 3 total txs being
9228                         // broadcasted.
9229                         assert_eq!(bob_txn.len(), 3);
9230                         check_spends!(bob_txn[1], chan_ab.3);
9231                 } else {
9232                         assert_eq!(bob_txn.len(), 2);
9233                         check_spends!(bob_txn[0], chan_ab.3);
9234                 }
9235         }
9236
9237         // Step (6):
9238         // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
9239         // broadcasted commitment transaction.
9240         {
9241                 let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
9242                 if go_onchain_before_fulfill {
9243                         // Bob should now have an extra broadcasted tx, for the preimage-claiming transaction.
9244                         assert_eq!(bob_txn.len(), 2);
9245                 }
9246                 let script_weight = match broadcast_alice {
9247                         true => OFFERED_HTLC_SCRIPT_WEIGHT,
9248                         false => ACCEPTED_HTLC_SCRIPT_WEIGHT
9249                 };
9250                 // If Alice force-closed and Bob didn't receive her commitment transaction until after he
9251                 // received Carol's fulfill, he broadcasts the HTLC-output-claiming transaction first. Else if
9252                 // Bob force closed or if he found out about Alice's commitment tx before receiving Carol's
9253                 // fulfill, then he broadcasts the HTLC-output-claiming transaction second.
9254                 if broadcast_alice && !go_onchain_before_fulfill {
9255                         check_spends!(bob_txn[0], txn_to_broadcast[0]);
9256                         assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
9257                 } else {
9258                         check_spends!(bob_txn[1], txn_to_broadcast[0]);
9259                         assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
9260                 }
9261         }
9262 }
9263
9264 #[test]
9265 fn test_onchain_htlc_settlement_after_close() {
9266         do_test_onchain_htlc_settlement_after_close(true, true);
9267         do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
9268         do_test_onchain_htlc_settlement_after_close(true, false);
9269         do_test_onchain_htlc_settlement_after_close(false, false);
9270 }
9271
9272 #[test]
9273 fn test_duplicate_chan_id() {
9274         // Test that if a given peer tries to open a channel with the same channel_id as one that is
9275         // already open we reject it and keep the old channel.
9276         //
9277         // Previously, full_stack_target managed to figure out that if you tried to open two channels
9278         // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
9279         // the existing channel when we detect the duplicate new channel, screwing up our monitor
9280         // updating logic for the existing channel.
9281         let chanmon_cfgs = create_chanmon_cfgs(2);
9282         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9283         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9284         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9285
9286         // Create an initial channel
9287         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9288         let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9289         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9290         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9291
9292         // Try to create a second channel with the same temporary_channel_id as the first and check
9293         // that it is rejected.
9294         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9295         {
9296                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9297                 assert_eq!(events.len(), 1);
9298                 match events[0] {
9299                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9300                                 // Technically, at this point, nodes[1] would be justified in thinking both the
9301                                 // first (valid) and second (invalid) channels are closed, given they both have
9302                                 // the same non-temporary channel_id. However, currently we do not, so we just
9303                                 // move forward with it.
9304                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
9305                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9306                         },
9307                         _ => panic!("Unexpected event"),
9308                 }
9309         }
9310
9311         // Move the first channel through the funding flow...
9312         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
9313
9314         nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
9315         check_added_monitors!(nodes[0], 0);
9316
9317         let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9318         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
9319         {
9320                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
9321                 assert_eq!(added_monitors.len(), 1);
9322                 assert_eq!(added_monitors[0].0, funding_output);
9323                 added_monitors.clear();
9324         }
9325         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
9326
9327         let funding_outpoint = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
9328         let channel_id = funding_outpoint.to_channel_id();
9329
9330         // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
9331         // temporary one).
9332
9333         // First try to open a second channel with a temporary channel id equal to the txid-based one.
9334         // Technically this is allowed by the spec, but we don't support it and there's little reason
9335         // to. Still, it shouldn't cause any other issues.
9336         open_chan_msg.temporary_channel_id = channel_id;
9337         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
9338         {
9339                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9340                 assert_eq!(events.len(), 1);
9341                 match events[0] {
9342                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9343                                 // Technically, at this point, nodes[1] would be justified in thinking both
9344                                 // channels are closed, but currently we do not, so we just move forward with it.
9345                                 assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
9346                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9347                         },
9348                         _ => panic!("Unexpected event"),
9349                 }
9350         }
9351
9352         // Now try to create a second channel which has a duplicate funding output.
9353         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
9354         let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
9355         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_2_msg);
9356         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9357         create_funding_transaction(&nodes[0], 100000, 42); // Get and check the FundingGenerationReady event
9358
9359         let funding_created = {
9360                 let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
9361                 let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
9362                 let logger = test_utils::TestLogger::new();
9363                 as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
9364         };
9365         check_added_monitors!(nodes[0], 0);
9366         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
9367         // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
9368         // still needs to be cleared here.
9369         check_added_monitors!(nodes[1], 1);
9370
9371         // ...still, nodes[1] will reject the duplicate channel.
9372         {
9373                 let events = nodes[1].node.get_and_clear_pending_msg_events();
9374                 assert_eq!(events.len(), 1);
9375                 match events[0] {
9376                         MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
9377                                 // Technically, at this point, nodes[1] would be justified in thinking both
9378                                 // channels are closed, but currently we do not, so we just move forward with it.
9379                                 assert_eq!(msg.channel_id, channel_id);
9380                                 assert_eq!(node_id, nodes[0].node.get_our_node_id());
9381                         },
9382                         _ => panic!("Unexpected event"),
9383                 }
9384         }
9385
9386         // finally, finish creating the original channel and send a payment over it to make sure
9387         // everything is functional.
9388         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
9389         {
9390                 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
9391                 assert_eq!(added_monitors.len(), 1);
9392                 assert_eq!(added_monitors[0].0, funding_output);
9393                 added_monitors.clear();
9394         }
9395
9396         let events_4 = nodes[0].node.get_and_clear_pending_events();
9397         assert_eq!(events_4.len(), 0);
9398         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9399         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
9400
9401         let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
9402         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
9403         update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
9404         send_payment(&nodes[0], &[&nodes[1]], 8000000);
9405 }
9406
9407 #[test]
9408 fn test_error_chans_closed() {
9409         // Test that we properly handle error messages, closing appropriate channels.
9410         //
9411         // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
9412         // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
9413         // we can test various edge cases around it to ensure we don't regress.
9414         let chanmon_cfgs = create_chanmon_cfgs(3);
9415         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9416         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9417         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9418
9419         // Create some initial channels
9420         let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9421         let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9422         let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9423
9424         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9425         assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
9426         assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
9427
9428         // Closing a channel from a different peer has no effect
9429         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
9430         assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
9431
9432         // Closing one channel doesn't impact others
9433         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
9434         check_added_monitors!(nodes[0], 1);
9435         check_closed_broadcast!(nodes[0], false);
9436         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
9437         assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
9438         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
9439         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
9440
9441         // A null channel ID should close all channels
9442         let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9443         nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
9444         check_added_monitors!(nodes[0], 2);
9445         let events = nodes[0].node.get_and_clear_pending_msg_events();
9446         assert_eq!(events.len(), 2);
9447         match events[0] {
9448                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9449                         assert_eq!(msg.contents.flags & 2, 2);
9450                 },
9451                 _ => panic!("Unexpected event"),
9452         }
9453         match events[1] {
9454                 MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
9455                         assert_eq!(msg.contents.flags & 2, 2);
9456                 },
9457                 _ => panic!("Unexpected event"),
9458         }
9459         // Note that at this point users of a standard PeerHandler will end up calling
9460         // peer_disconnected with no_connection_possible set to false, duplicating the
9461         // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
9462         // users with their own peer handling logic. We duplicate the call here, however.
9463         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9464         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9465
9466         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
9467         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
9468         assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
9469 }
9470
9471 #[test]
9472 fn test_invalid_funding_tx() {
9473         // Test that we properly handle invalid funding transactions sent to us from a peer.
9474         //
9475         // Previously, all other major lightning implementations had failed to properly sanitize
9476         // funding transactions from their counterparties, leading to a multi-implementation critical
9477         // security vulnerability (though we always sanitized properly, we've previously had
9478         // un-released crashes in the sanitization process).
9479         let chanmon_cfgs = create_chanmon_cfgs(2);
9480         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9481         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9482         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9483
9484         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
9485         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
9486         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
9487
9488         let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], 100_000, 42);
9489         for output in tx.output.iter_mut() {
9490                 // Make the confirmed funding transaction have a bogus script_pubkey
9491                 output.script_pubkey = bitcoin::Script::new();
9492         }
9493
9494         nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, tx.clone(), 0).unwrap();
9495         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
9496         check_added_monitors!(nodes[1], 1);
9497
9498         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
9499         check_added_monitors!(nodes[0], 1);
9500
9501         let events_1 = nodes[0].node.get_and_clear_pending_events();
9502         assert_eq!(events_1.len(), 0);
9503
9504         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
9505         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
9506         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
9507
9508         confirm_transaction_at(&nodes[1], &tx, 1);
9509         check_added_monitors!(nodes[1], 1);
9510         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
9511         assert_eq!(events_2.len(), 1);
9512         if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
9513                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
9514                 if let msgs::ErrorAction::SendErrorMessage { msg } = action {
9515                         assert_eq!(msg.data, "funding tx had wrong script/value or output index");
9516                 } else { panic!(); }
9517         } else { panic!(); }
9518         assert_eq!(nodes[1].node.list_channels().len(), 0);
9519 }
9520
9521 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
9522         // In the first version of the chain::Confirm interface, after a refactor was made to not
9523         // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
9524         // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
9525         // `best_block_updated` is at height N, and a transaction output which we wish to spend at
9526         // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
9527         // spending transaction until height N+1 (or greater). This was due to the way
9528         // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
9529         // spending transaction at the height the input transaction was confirmed at, not whether we
9530         // should broadcast a spending transaction at the current height.
9531         // A second, similar, issue involved failing HTLCs backwards - because we only provided the
9532         // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
9533         // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
9534         // until we learned about an additional block.
9535         //
9536         // As an additional check, if `test_height_before_timelock` is set, we instead test that we
9537         // aren't broadcasting transactions too early (ie not broadcasting them at all).
9538         let chanmon_cfgs = create_chanmon_cfgs(3);
9539         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
9540         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
9541         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9542         *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9543
9544         create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
9545         let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
9546         let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
9547         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
9548         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
9549
9550         nodes[1].node.force_close_channel(&channel_id).unwrap();
9551         check_closed_broadcast!(nodes[1], true);
9552         check_added_monitors!(nodes[1], 1);
9553         let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9554         assert_eq!(node_txn.len(), 1);
9555
9556         let conf_height = nodes[1].best_block_info().1;
9557         if !test_height_before_timelock {
9558                 connect_blocks(&nodes[1], 24 * 6);
9559         }
9560         nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9561                 &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
9562         if test_height_before_timelock {
9563                 // If we confirmed the close transaction, but timelocks have not yet expired, we should not
9564                 // generate any events or broadcast any transactions
9565                 assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
9566                 assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
9567         } else {
9568                 // We should broadcast an HTLC transaction spending our funding transaction first
9569                 let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
9570                 assert_eq!(spending_txn.len(), 2);
9571                 assert_eq!(spending_txn[0], node_txn[0]);
9572                 check_spends!(spending_txn[1], node_txn[0]);
9573                 // We should also generate a SpendableOutputs event with the to_self output (as its
9574                 // timelock is up).
9575                 let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9576                 assert_eq!(descriptor_spend_txn.len(), 1);
9577
9578                 // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9579                 // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
9580                 // additional block built on top of the current chain.
9581                 nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9582                         &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
9583                 expect_pending_htlcs_forwardable!(nodes[1]);
9584                 check_added_monitors!(nodes[1], 1);
9585
9586                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
9587                 assert!(updates.update_add_htlcs.is_empty());
9588                 assert!(updates.update_fulfill_htlcs.is_empty());
9589                 assert_eq!(updates.update_fail_htlcs.len(), 1);
9590                 assert!(updates.update_fail_malformed_htlcs.is_empty());
9591                 assert!(updates.update_fee.is_none());
9592                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9593                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9594                 expect_payment_failed!(nodes[0], payment_hash, false);
9595                 expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true);
9596         }
9597 }
9598
9599 #[test]
9600 fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
9601         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
9602         do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
9603 }
9604
9605 #[test]
9606 fn test_keysend_payments_to_public_node() {
9607         let chanmon_cfgs = create_chanmon_cfgs(2);
9608         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9609         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9610         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9611
9612         let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
9613         let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
9614         let payer_pubkey = nodes[0].node.get_our_node_id();
9615         let payee_pubkey = nodes[1].node.get_our_node_id();
9616         let route = get_route(&payer_pubkey, &network_graph, &payee_pubkey, None,
9617                         None, &vec![], 10000, 40,
9618                         nodes[0].logger).unwrap();
9619
9620         let test_preimage = PaymentPreimage([42; 32]);
9621         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
9622         check_added_monitors!(nodes[0], 1);
9623         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9624         assert_eq!(events.len(), 1);
9625         let event = events.pop().unwrap();
9626         let path = vec![&nodes[1]];
9627         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9628         claim_payment(&nodes[0], &path, test_preimage);
9629 }
9630
9631 #[test]
9632 fn test_keysend_payments_to_private_node() {
9633         let chanmon_cfgs = create_chanmon_cfgs(2);
9634         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
9635         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
9636         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
9637
9638         let payer_pubkey = nodes[0].node.get_our_node_id();
9639         let payee_pubkey = nodes[1].node.get_our_node_id();
9640         nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
9641         nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
9642
9643         let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
9644         let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
9645         let first_hops = nodes[0].node.list_usable_channels();
9646         let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
9647                                 Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
9648                                 nodes[0].logger).unwrap();
9649
9650         let test_preimage = PaymentPreimage([42; 32]);
9651         let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
9652         check_added_monitors!(nodes[0], 1);
9653         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
9654         assert_eq!(events.len(), 1);
9655         let event = events.pop().unwrap();
9656         let path = vec![&nodes[1]];
9657         pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
9658         claim_payment(&nodes[0], &path, test_preimage);
9659 }