]> git.bitcoin.ninja Git - rust-lightning/commitdiff
-f address Matt/Val's comments
authorAntoine Riard <dev@ariard.me>
Thu, 16 Sep 2021 14:46:16 +0000 (10:46 -0400)
committerAntoine Riard <dev@ariard.me>
Fri, 17 Sep 2021 22:43:19 +0000 (18:43 -0400)
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/util/events.rs

index 680ff91826c83d2bd192515fef888d3aed36490c..4775f0e31dce9a027625032302ffbde53b62913a 100644 (file)
@@ -217,8 +217,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events_3 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_3.len(), 1);
@@ -593,8 +592,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[0], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
@@ -714,8 +712,7 @@ fn test_monitor_update_fail_cs() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -773,8 +770,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 0);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -855,14 +851,12 @@ fn test_monitor_update_raa_while_paused() {
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
        check_added_monitors!(nodes[0], 1);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[0], events);
+       expect_pending_htlcs_forwardable!(nodes[0]);
        expect_payment_received!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
        check_added_monitors!(nodes[1], 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -887,8 +881,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
        assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -916,8 +909,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -947,8 +939,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
        // and not forwarded.
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
@@ -976,8 +967,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
        let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
@@ -1102,8 +1092,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        check_added_monitors!(nodes[2], 1);
        assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
 
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
 
        let events_6 = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events_6.len(), 2);
@@ -1117,8 +1106,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        };
 
        if test_ignore_second_cs {
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 1);
 
                send_event = SendEvent::from_node(&nodes[1]);
@@ -1127,8 +1115,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
 
-               let events = nodes[0].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[0], events);
+               expect_pending_htlcs_forwardable!(nodes[0]);
 
                let events_9 = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events_9.len(), 1);
@@ -1308,8 +1295,7 @@ fn raa_no_response_awaiting_raa_state() {
        // nodes[1] should be AwaitingRAA here!
        check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        // We send a third payment here, which is somewhat of a redundant test, but the
@@ -1340,8 +1326,7 @@ fn raa_no_response_awaiting_raa_state() {
        // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
        let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
@@ -1354,8 +1339,7 @@ fn raa_no_response_awaiting_raa_state() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1471,8 +1455,7 @@ fn claim_while_disconnected_monitor_update_fail() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       let mut events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
@@ -1554,8 +1537,7 @@ fn monitor_failed_no_reestablish_response() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       let mut events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1643,8 +1625,7 @@ fn first_message_on_recv_ordering() {
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
 
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1657,8 +1638,7 @@ fn first_message_on_recv_ordering() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -1740,15 +1720,13 @@ fn test_monitor_update_fail_claim() {
        expect_payment_sent!(nodes[0], payment_preimage_1, events);
 
        // Get the payment forwards, note that they were batched into one commitment update.
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[0], events);
+       expect_pending_htlcs_forwardable!(nodes[0]);
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@ -1801,8 +1779,7 @@ fn test_monitor_update_on_pending_forwards() {
 
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
        assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
        let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
@@ -1825,8 +1802,7 @@ fn test_monitor_update_on_pending_forwards() {
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -1903,8 +1879,7 @@ fn monitor_update_claim_fail_no_response() {
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -2168,8 +2143,7 @@ fn test_pending_update_fee_ack_on_reconnect() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[0], events);
+       expect_pending_htlcs_forwardable!(nodes[0]);
        expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
 
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
@@ -2453,15 +2427,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
 
        let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
        check_added_monitors!(nodes[1], 1);
 
        commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
 
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
@@ -2528,8 +2500,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        };
        if second_fails {
                assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
-               let events = nodes[2].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[2], events);
+               expect_pending_htlcs_forwardable!(nodes[2]);
                check_added_monitors!(nodes[2], 1);
                get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        } else {
@@ -2563,8 +2534,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
 
        if second_fails {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
        } else {
                reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
@@ -2572,8 +2542,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
        if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
                nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
                check_added_monitors!(nodes[1], 1);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable_ignore!(nodes[1], events); // We finally receive the second payment, but don't claim it
+               expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
 
                bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
                assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
index fdbc78583bb2881fa06de46d84004532e7a997ee..7e848f3cfbe2b5f2a967cd6572551bf36e2fd85e 100644 (file)
@@ -258,7 +258,7 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: Some(channel_id),
+                       chan_id: None,
                        shutdown_finish: None,
                }
        }
@@ -837,7 +837,7 @@ macro_rules! handle_error {
                                                });
                                        }
                                        if let Some(channel_id) = chan_id {
-                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  err: ClosureReason::ProcessingError { err: err.err.clone() } });
+                                               $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id,  reason: ClosureReason::ProcessingError { err: err.err.clone() } });
                                        }
                                }
 
@@ -1447,6 +1447,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// `peer_node_id` should be set when we receive a message from a peer, but not set when the user closes, which will be re-exposed as the `ChannelClosed`
+       /// reason.
        fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -1463,12 +1465,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                let mut pending_events_lock = self.pending_events.lock().unwrap();
                                if peer_node_id.is_some() {
                                        if let Some(peer_msg) = peer_msg {
-                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::CounterpartyForceClosed { peer_msg: Some(peer_msg.to_string()) } });
-                                       } else {
-                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::CounterpartyForceClosed { peer_msg: None } });
+                                               pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
                                        }
                                } else {
-                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::HolderForceClosed });
+                                       pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
                                }
                                chan.remove_entry().1
                        } else {
@@ -2435,7 +2435,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       // ChannelClosed event is generated by handle_errors for us.
+                                                                                       // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
@@ -3565,8 +3565,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        msg: update
                                });
                        }
-                       //TODO: split between CounterpartyInitiated/LocallyInitiated
-                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  err: ClosureReason::CooperativeClosure });
+                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id,  reason: ClosureReason::CooperativeClosure });
                }
                Ok(())
        }
@@ -3978,7 +3977,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                msg: update
                                                        });
                                                }
-                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureReason::CommitmentTxBroadcasted });
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::CommitmentTxBroadcasted });
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                        node_id: chan.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -4514,7 +4513,7 @@ where
                                                        msg: update
                                                });
                                        }
-                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  err: ClosureReason::CommitmentTxBroadcasted });
+                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(),  reason: ClosureReason::CommitmentTxBroadcasted });
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
@@ -4705,7 +4704,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                                msg: update
                                                        });
                                                }
-                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureReason::DisconnectedPeer });
+                                               self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                false
                                        } else {
                                                true
@@ -4720,7 +4719,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                                        if let Some(short_id) = chan.get_short_channel_id() {
                                                                short_to_id.remove(&short_id);
                                                        }
-                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  err: ClosureReason::DisconnectedPeer });
+                                                       self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(),  reason: ClosureReason::DisconnectedPeer });
                                                        return false;
                                                } else {
                                                        no_channels_remain = false;
@@ -5679,10 +5678,8 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -5747,7 +5744,7 @@ mod tests {
 
        #[test]
        fn test_keysend_dup_payment_hash() {
-               return true;
+               return;
 
                // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
                //      outbound regular payment fails as expected.
@@ -5816,10 +5813,8 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
index ac9f4b396cff3a0da2b9fd6fcba40fad44b80963..a73a532467e78deb24e59e4beaa87f8667b32907 100644 (file)
@@ -770,7 +770,9 @@ macro_rules! check_closed_event {
                let events = $node.node.get_and_clear_pending_events();
                assert_eq!(events.len(), $events);
                match events[0] {
-                       Event::ChannelClosed { .. } => {}
+                       Event::ChannelClosed { ref reason, .. } => {
+                               //assert_eq!(reason, $reason);
+                       },
                        _ => panic!("Unexpected event"),
                }
        }}
@@ -940,8 +942,7 @@ macro_rules! commitment_signed_dance {
                {
                        commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
                        if $fail_backwards {
-                               let events = $node_a.node.get_and_clear_pending_events();
-                               expect_pending_htlcs_forwardable!($node_a, events);
+                               expect_pending_htlcs_forwardable!($node_a);
                                check_added_monitors!($node_a, 1);
 
                                let channel_state = $node_a.node.channel_state.lock().unwrap();
@@ -983,9 +984,10 @@ macro_rules! get_route_and_payment_hash {
 }
 
 macro_rules! expect_pending_htlcs_forwardable_ignore {
-       ($node: expr, $events: expr) => {{
-               assert_eq!($events.len(), 1);
-               match $events[0] {
+       ($node: expr) => {{
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -993,12 +995,26 @@ macro_rules! expect_pending_htlcs_forwardable_ignore {
 }
 
 macro_rules! expect_pending_htlcs_forwardable {
-       ($node: expr, $events: expr) => {{
-               expect_pending_htlcs_forwardable_ignore!($node, $events);
+       ($node: expr) => {{
+               expect_pending_htlcs_forwardable_ignore!($node);
                $node.node.process_pending_htlc_forwards();
        }}
 }
 
+#[cfg(test)]
+macro_rules! expect_pending_htlcs_forwardable_from_events {
+       ($node: expr, $events: expr, $ignore: expr) => {{
+               assert_eq!($events.len(), 1);
+               match $events[0] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
+               if $ignore {
+                       $node.node.process_pending_htlc_forwards();
+               }
+       }}
+}
+
 #[cfg(any(test, feature = "unstable"))]
 macro_rules! expect_payment_received {
        ($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
@@ -1114,8 +1130,7 @@ pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path
                check_added_monitors!(node, 0);
                commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
 
-               let events = node.node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(node, events);
+               expect_pending_htlcs_forwardable!(node);
 
                if idx == expected_path.len() - 1 {
                        let events_2 = node.node.get_and_clear_pending_events();
index f44fed6c2b88537a0ecaba66751035ea19c2a012..8c18666bef5322a65965bd99d74d459c8eebdf71 100644 (file)
@@ -725,8 +725,7 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[0], events);
+       expect_pending_htlcs_forwardable!(nodes[0]);
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
@@ -1058,10 +1057,8 @@ fn holding_cell_htlc_counting() {
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
        // fails), the second will process the resulting failure and fail the HTLC backward.
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1102,8 +1099,7 @@ fn holding_cell_htlc_counting() {
        nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
        check_added_monitors!(nodes[2], 1);
 
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
 
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), payments.len());
@@ -1793,15 +1789,13 @@ fn test_channel_reserve_holding_cell_htlcs() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let ref payment_event_11 = expect_forward!(nodes[1]);
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
        commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
 
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
 
        // flush the htlcs in the holding cell
@@ -1809,8 +1803,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
        commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let ref payment_event_3 = expect_forward!(nodes[1]);
        assert_eq!(payment_event_3.msgs.len(), 2);
@@ -1818,8 +1811,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
 
        commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
 
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@ -1971,8 +1963,7 @@ fn channel_reserve_in_flight_removes() {
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
 
        // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
@@ -2019,8 +2010,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        check_added_monitors!(nodes[0], 1);
 
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[0], events);
+       expect_pending_htlcs_forwardable!(nodes[0]);
        expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
 
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
@@ -2452,7 +2442,7 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1);
                let mut events = nodes[0].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
                match events[1] {
                        Event::ChannelClosed { .. } => {}
                        _ => panic!("Unexpected event"),
@@ -2743,8 +2733,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
 
        let events = nodes[2].node.get_and_clear_pending_msg_events();
@@ -2816,8 +2805,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
                }
        }
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -2885,8 +2873,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -2949,8 +2936,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -2963,8 +2949,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        // Drop the last RAA from 3 -> 2
 
        assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -2981,8 +2966,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        check_added_monitors!(nodes[2], 1);
 
        assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
-       let events = nodes[2].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[2], events);
+       expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3277,8 +3261,7 @@ fn test_force_close_fail_back() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -3928,8 +3911,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
@@ -3998,8 +3980,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                connect_block(&nodes[1], &block);
        }
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        check_added_monitors!(nodes[1], 1);
        let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -4059,8 +4040,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 0);
        } else {
                let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
@@ -4075,8 +4055,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
        connect_blocks(&nodes[1], 1);
 
        if forwarded_htlc {
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 1);
                let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(fail_commit.len(), 1);
@@ -5238,8 +5217,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -5415,8 +5393,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
        assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
        check_added_monitors!(nodes[4], 0);
-       let events = nodes[4].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[4], events);
+       expect_pending_htlcs_forwardable!(nodes[4]);
        check_added_monitors!(nodes[4], 1);
 
        let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
@@ -5430,8 +5407,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
        assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
        check_added_monitors!(nodes[5], 0);
-       let events = nodes[5].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[5], events);
+       expect_pending_htlcs_forwardable!(nodes[5]);
        check_added_monitors!(nodes[5], 1);
 
        let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
@@ -5441,8 +5417,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
 
-       let events = nodes[3].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[3], events);
+       expect_pending_htlcs_forwardable!(nodes[3]);
        check_added_monitors!(nodes[3], 1);
        let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
@@ -5481,7 +5456,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                        }
                        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
                        check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+                       expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
                } else {
                        assert_eq!(events.len(), 1);
                        match events[0] {
@@ -5490,8 +5465,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                        }
                        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
                        check_closed_broadcast!(nodes[2], true);
-                       let events = nodes[2].node.get_and_clear_pending_events();
-                       expect_pending_htlcs_forwardable!(nodes[2], events);
+                       expect_pending_htlcs_forwardable!(nodes[2]);
                }
        } else {
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
@@ -5504,7 +5478,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                        }
                        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
                        check_closed_broadcast!(nodes[2], true);
-                       expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+                       expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
                } else {
                        assert_eq!(events.len(), 1);
                        match events[0] {
@@ -5513,8 +5487,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                        }
                        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
                        check_closed_broadcast!(nodes[2], true);
-                       let events = nodes[2].node.get_and_clear_pending_events();
-                       expect_pending_htlcs_forwardable!(nodes[2], events);
+                       expect_pending_htlcs_forwardable!(nodes[2]);
                }
        }
        check_added_monitors!(nodes[2], 3);
@@ -5889,8 +5862,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -6320,8 +6292,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
@@ -6520,8 +6491,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
        }
        let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
@@ -7043,8 +7013,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
        check_added_monitors!(nodes[1], 1);
@@ -7077,8 +7046,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 1);
 
@@ -7122,8 +7090,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
        check_added_monitors!(nodes[1], 0);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
        let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -7491,10 +7458,8 @@ fn test_check_htlc_underpaying() {
 
        // Note that we first have to wait a random delay before processing the receipt of the HTLC,
        // and then will wait a second random delay before failing the HTLC back:
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable!(nodes[1]);
 
        // Node 3 is expecting payment of 100_000 but received 10_000,
        // it should fail htlc like we didn't know the preimage.
@@ -7926,7 +7891,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
        let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
        match events[1] {
                Event::ChannelClosed { .. } => {}
                _ => panic!("Unexpected event"),
@@ -8203,8 +8168,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        // Broadcast set of revoked txn on A
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable_ignore!(nodes[0], events);
+       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
 
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -8324,8 +8288,7 @@ fn test_preimage_storage() {
        }
        // Note that after leaving the above scope we have no knowledge of any arguments or return
        // values from previous calls.
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -8396,8 +8359,7 @@ fn test_secret_timeout() {
        }
        // Note that after leaving the above scope we have no knowledge of any arguments or return
        // values from previous calls.
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -8441,10 +8403,8 @@ fn test_bad_secret_hash() {
 
                        // We have to forward pending HTLCs once to process the receipt of the HTLC and then
                        // again to process the pending backwards-failure of the HTLC
-                       let events = nodes[1].node.get_and_clear_pending_events();
-                       expect_pending_htlcs_forwardable!(nodes[1], events);
-                       let events = nodes[1].node.get_and_clear_pending_events();
-                       expect_pending_htlcs_forwardable!(nodes[1], events);
+                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable!(nodes[1]);
                        check_added_monitors!(nodes[1], 1);
 
                        // We should fail the payment back
@@ -9257,8 +9217,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
                        &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
                check_added_monitors!(nodes[1], 1);
 
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
index 9c1695148cd6622f2ac12da9444d85d3efec1031..b05312013d19544f9017cbcd870e1a5d4ac5acf6 100644 (file)
@@ -65,8 +65,7 @@ fn chanmon_fail_from_stale_commitment() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false);
 
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
        check_added_monitors!(nodes[1], 1);
 
@@ -79,8 +78,7 @@ fn chanmon_fail_from_stale_commitment() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable!(nodes[1], events);
+       expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
index 814782e59e4836f18e96197e86145484fa39014e..70d5a0c0bdfe30362668824ed6e77e0e267755a0 100644 (file)
@@ -122,8 +122,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
                                expect_htlc_forward!(&nodes[2]);
                                expect_event!(&nodes[2], Event::PaymentReceived);
                                callback_node();
-                               let events = nodes[2].node.get_and_clear_pending_events();
-                               expect_pending_htlcs_forwardable!(nodes[2], events);
+                               expect_pending_htlcs_forwardable!(nodes[2]);
                        }
 
                        let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
index 98b942e9c7109a3cfbce9c135f186259a315c588..8178f9bfde40b44fea3e564a3f48fd78906b0f18 100644 (file)
@@ -149,8 +149,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                        txdata: vec![],
                };
                connect_block(&nodes[1], &block);
-               let events = nodes[1].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable!(nodes[1], events);
+               expect_pending_htlcs_forwardable!(nodes[1]);
        }
 
        check_added_monitors!(nodes[1], 1);
index 2e54e2f3f15b7a6f0162dcd3ac0492a466024b27..ca0427a3e6121ce329f31a68a415785840cd1cbe 100644 (file)
@@ -70,30 +70,39 @@ pub enum PaymentPurpose {
 }
 
 #[derive(Clone, Debug)]
-/// Some information provided on the closure source of the channel halting.
+/// The reason which the channel was closed. See individual variants more details.
 pub enum ClosureReason {
-       /// Closure generated from receiving a peer error message by ChannelManager::handle_error
+       /// Closure generated from receiving a peer error message.
        CounterpartyForceClosed {
-               /// The error is coming from the peer, there *might* be a human-readable msg
-               peer_msg: Option<String>,
+               /// The error which the peer sent us.
+               ///
+               /// The string should be sanitized before it is used (e.g emitted to logs
+               /// or printed to stdout). Otherwise, a well crafted error message may trigger
+               /// a security in the terminal emulator or the logging subsystem.
+               peer_msg: String,
        },
-       /// Closure generated from ChannelManager::force_close_channel
+       /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
        HolderForceClosed,
-       /// Closure generated from receiving a peer's ClosingSigned message. Note the shutdown
-       /// sequence might have been initially initiated by us.
+       /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+       /// the cooperative close transaction. Note the shutdown may have been initiated by us.
+       //TODO: split between CounterpartyInitiated/LocallyInitiated
        CooperativeClosure,
-       /// Closure generated from receiving chain::Watch's CommitmentTxBroadcast event.
+       /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
+       /// commitment transaction came from our counterparty, but it may also have come from
+       /// a copy of our own `ChannelMonitor`.
        CommitmentTxBroadcasted,
        /// Closure generated from processing an event, likely a HTLC forward/relay/reception.
        ProcessingError {
                err: String,
        },
-       /// Closure generated from ChannelManager::peer_disconnected.
+       /// The `PeerManager` informed us that we've disconnected from the peer and that it is
+       /// unlikely we'll be able to connect to the peer, most likely because we have incompatible
+       /// features and the peer, or we, require features which we, or the peer, do not support.
        DisconnectedPeer,
 }
 
 impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
-       (0, CounterpartyForceClosed) => { (1, peer_msg, option) },
+       (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
        (2, HolderForceClosed) => {},
        (6, CommitmentTxBroadcasted) => {},
        (4, CooperativeClosure) => {},
@@ -224,11 +233,11 @@ pub enum Event {
        },
        /// Used to indicate that a channel with the given `channel_id` is in the process of closure.
        ChannelClosed  {
-               /// The channel_id which has been barren from further off-chain updates but
-               /// funding output might still be not resolved yet.
+               /// The channel_id of the channel which has been closed. Note that on-chain transactions
+               /// resolving the channel are likely still awaiting confirmation.
                channel_id: [u8; 32],
-               /// A machine-readable error message
-               err: ClosureReason
+               /// The reason the channel was closed.
+               reason: ClosureReason
        }
 }
 
@@ -306,10 +315,10 @@ impl Writeable for Event {
                                        (2, claim_from_onchain_tx, required),
                                });
                        },
-                       &Event::ChannelClosed { ref channel_id, ref err } => {
-                               8u8.write(writer)?;
+                       &Event::ChannelClosed { ref channel_id, ref reason } => {
+                               9u8.write(writer)?;
                                channel_id.write(writer)?;
-                               err.write(writer)?;
+                               reason.write(writer)?;
                                write_tlv_fields!(writer, {});
                        },
                }
@@ -425,12 +434,12 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
-                       8u8 => {
+                       9u8 => {
                                let channel_id = Readable::read(reader)?;
-                               let err = MaybeReadable::read(reader)?;
+                               let reason = MaybeReadable::read(reader)?;
                                read_tlv_fields!(reader, {});
-                               if err.is_none() { return Ok(None); }
-                               Ok(Some(Event::ChannelClosed { channel_id, err: err.unwrap() }))
+                               if reason.is_none() { return Ok(None); }
+                               Ok(Some(Event::ChannelClosed { channel_id, reason: reason.unwrap() }))
                        },
                        // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
                        x if x % 2 == 1 => Ok(None),