Merge pull request #970 from TheBlueMatt/2021-06-no-confirmed-csv-delay
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 2 Jul 2021 17:55:17 +0000 (17:55 +0000)
committerGitHub <noreply@github.com>
Fri, 2 Jul 2021 17:55:17 +0000 (17:55 +0000)
Create SpendableOutputs events no matter the chain::Confirm order

CHANGELOG.md
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/peer_handler.rs
lightning/src/util/events.rs
lightning/src/util/test_utils.rs

index 332a8e4ba00799695c340a4295029fe5d94606c1..2931466aed9d5be571b1fab9440a5ab9aa4fce47 100644 (file)
@@ -1,3 +1,13 @@
+# 0.0.99 - WIP
+
+## Serialization Compatibility
+
+ * Due to a bug discovered in 0.0.98, if a `ChannelManager` is serialized on
+   version 0.0.98 while an `Event::PaymentSent` is pending processing, the
+   `ChannelManager` will fail to deserialize both on version 0.0.98 and later
+   versions. If you have such a `ChannelManager` available, a simple patch will
+   allow it to deserialize, please file an issue if you need assistance.
+
 # 0.0.98 - 2021-06-11
 
 0.0.98 should be considered a release candidate to the first alpha release of
index fdcf7c2de70ee71edb5bd8e3d2ff4dfdafa67606..dd7b1906ca079aced33d3a62384fc1772d02b49c 100644 (file)
@@ -905,8 +905,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       // Attempt to forward a third payment but fail due to the second channel being unavailable
-       // for forwarding.
+       // Forward a third payment which will also be added to the holding cell, despite the channel
+       // being paused waiting a monitor update.
        let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[2]);
        {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
@@ -921,39 +921,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
        check_added_monitors!(nodes[1], 0);
 
-       let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events_2.len(), 1);
-       match events_2.remove(0) {
-               MessageSendEvent::UpdateHTLCs { node_id, updates } => {
-                       assert_eq!(node_id, nodes[0].node.get_our_node_id());
-                       assert!(updates.update_fulfill_htlcs.is_empty());
-                       assert_eq!(updates.update_fail_htlcs.len(), 1);
-                       assert!(updates.update_fail_malformed_htlcs.is_empty());
-                       assert!(updates.update_add_htlcs.is_empty());
-                       assert!(updates.update_fee.is_none());
-
-                       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
-                       commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
-
-                       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
-                       assert_eq!(msg_events.len(), 1);
-                       match msg_events[0] {
-                               MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
-                                       assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
-                                       assert_eq!(msg.contents.flags & 2, 2); // temp disabled
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
-
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       assert_eq!(events.len(), 1);
-                       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
-                               assert_eq!(payment_hash, payment_hash_3);
-                               assert!(!rejected_by_dest);
-                       } else { panic!("Unexpected event!"); }
-               },
-               _ => panic!("Unexpected event type!"),
-       };
+       // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
+       // and not forwarded.
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 0);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
                // Try to route another payment backwards from 2 to make sure 1 holds off on responding
@@ -970,7 +942,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
                (Some(payment_preimage_4), Some(payment_hash_4))
        } else { (None, None) };
 
@@ -1020,14 +991,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
        commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
-       let events_4 = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
-               assert_eq!(payment_hash, payment_hash_1);
-               assert!(rejected_by_dest);
-       } else { panic!("Unexpected event!"); }
+       expect_payment_failed!(nodes[0], payment_hash_1, true);
 
        nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
+       let as_cs;
        if test_ignore_second_cs {
                nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
                check_added_monitors!(nodes[2], 1);
@@ -1043,40 +1010,83 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
                nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
                check_added_monitors!(nodes[1], 1);
-               let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
-               assert!(as_cs.update_add_htlcs.is_empty());
-               assert!(as_cs.update_fail_htlcs.is_empty());
-               assert!(as_cs.update_fail_malformed_htlcs.is_empty());
-               assert!(as_cs.update_fulfill_htlcs.is_empty());
-               assert!(as_cs.update_fee.is_none());
+               as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
 
                nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
                check_added_monitors!(nodes[1], 1);
-               let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
-
-               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
+       } else {
+               nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
                check_added_monitors!(nodes[2], 1);
-               let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
 
-               nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
-               check_added_monitors!(nodes[2], 1);
-               assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+               let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
+               assert_eq!(bs_revoke_and_commit.len(), 2);
+               match bs_revoke_and_commit[0] {
+                       MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+                               assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
+                               check_added_monitors!(nodes[1], 1);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
 
-               nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
-               check_added_monitors!(nodes[1], 1);
-               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       } else {
-               commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
+               as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+               match bs_revoke_and_commit[1] {
+                       MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+                               assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                               assert!(updates.update_add_htlcs.is_empty());
+                               assert!(updates.update_fail_htlcs.is_empty());
+                               assert!(updates.update_fail_malformed_htlcs.is_empty());
+                               assert!(updates.update_fulfill_htlcs.is_empty());
+                               assert!(updates.update_fee.is_none());
+                               nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
+                               check_added_monitors!(nodes[1], 1);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
        }
 
+       assert_eq!(as_cs.update_add_htlcs.len(), 1);
+       assert!(as_cs.update_fail_htlcs.is_empty());
+       assert!(as_cs.update_fail_malformed_htlcs.is_empty());
+       assert!(as_cs.update_fulfill_htlcs.is_empty());
+       assert!(as_cs.update_fee.is_none());
+       let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+
+
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
+       nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
+       check_added_monitors!(nodes[2], 1);
+       let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
+       check_added_monitors!(nodes[2], 1);
+       let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[1], 1);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+       nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
+       check_added_monitors!(nodes[1], 1);
+       let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+
+       nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
+       check_added_monitors!(nodes[2], 1);
+       assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
        expect_pending_htlcs_forwardable!(nodes[2]);
 
        let events_6 = nodes[2].node.get_and_clear_pending_events();
-       assert_eq!(events_6.len(), 1);
+       assert_eq!(events_6.len(), 2);
        match events_6[0] {
                Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
                _ => panic!("Unexpected event"),
        };
+       match events_6[1] {
+               Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
+               _ => panic!("Unexpected event"),
+       };
 
        if test_ignore_second_cs {
                expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1611,9 +1621,9 @@ fn first_message_on_recv_ordering() {
 fn test_monitor_update_fail_claim() {
        // Basic test for monitor update failures when processing claim_funds calls.
        // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
-       // update to claim the payment. We then send a payment C->B->A, making the forward of this
-       // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
-       // updating and claim the payment on B.
+       // update to claim the payment. We then send two payments C->B->A, which are held at B.
+       // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
+       // the payments from C onwards to A.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -1629,12 +1639,19 @@ fn test_monitor_update_fail_claim() {
 
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
        assert!(nodes[1].node.claim_funds(payment_preimage_1));
+       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
+       // Note that at this point there is a pending commitment transaction update for A being held by
+       // B. Even when we go to send the payment from C through B to A, B will not update this
+       // already-signed commitment transaction and will instead wait for it to resolve before
+       // forwarding the payment onwards.
+
        let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[0]);
+       let route;
        {
                let net_graph_msg_handler = &nodes[2].net_graph_msg_handler;
-               let route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
+               route = get_route(&nodes[2].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, &logger).unwrap();
                nodes[2].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
                check_added_monitors!(nodes[2], 1);
        }
@@ -1649,29 +1666,19 @@ fn test_monitor_update_fail_claim() {
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
 
-       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
-       nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
-       commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
-
-       let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
-       assert_eq!(msg_events.len(), 1);
-       match msg_events[0] {
-               MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
-                       assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
-                       assert_eq!(msg.contents.flags & 2, 2); // temp disabled
-               },
-               _ => panic!("Unexpected event"),
-       }
+       let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
+       nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3)).unwrap();
+       check_added_monitors!(nodes[2], 1);
 
-       let events = nodes[2].node.get_and_clear_pending_events();
+       let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
-       if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
-               assert_eq!(payment_hash, payment_hash_2);
-               assert!(!rejected_by_dest);
-       } else { panic!("Unexpected event!"); }
+       let payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
+       let events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 0);
+       commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
 
        // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
        let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1681,12 +1688,37 @@ fn test_monitor_update_fail_claim() {
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
+       expect_payment_sent!(nodes[0], payment_preimage_1);
+
+       // Get the payment forwards, note that they were batched into one commitment update.
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+       let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
+       commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
+       expect_pending_htlcs_forwardable!(nodes[0]);
 
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       if let Event::PaymentSent { payment_preimage, .. } = events[0] {
-               assert_eq!(payment_preimage, payment_preimage_1);
-       } else { panic!("Unexpected event!"); }
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
+                       assert_eq!(payment_hash_2, *payment_hash);
+                       assert!(payment_preimage.is_none());
+                       assert_eq!(payment_secret_2, *payment_secret);
+                       assert_eq!(1_000_000, amt);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentReceived { ref payment_hash, ref payment_preimage, ref payment_secret, amt, user_payment_id: _ } => {
+                       assert_eq!(payment_hash_3, *payment_hash);
+                       assert!(payment_preimage.is_none());
+                       assert_eq!(payment_secret_3, *payment_secret);
+                       assert_eq!(1_000_000, amt);
+               },
+               _ => panic!("Unexpected event"),
+       }
 }
 
 #[test]
index 403744c4f6ef38208d4e87f814d175e814155f5e..4051ebf3fbf6cc0d07dd29c4132c2dab3510bb43 100644 (file)
@@ -288,7 +288,7 @@ impl HTLCCandidate {
 }
 
 /// Information needed for constructing an invoice route hint for this channel.
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub struct CounterpartyForwardingInfo {
        /// Base routing fee in millisatoshis.
        pub fee_base_msat: u32,
@@ -3513,7 +3513,7 @@ impl<Signer: Sign> Channel<Signer> {
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
        pub fn is_live(&self) -> bool {
-               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) == 0)
+               self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
        }
 
        /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
@@ -4030,10 +4030,18 @@ impl<Signer: Sign> Channel<Signer> {
 
        /// Adds a pending outbound HTLC to this channel, note that you probably want
        /// send_htlc_and_commit instead cause you'll want both messages at once.
-       /// This returns an option instead of a pure UpdateAddHTLC as we may be in a state where we are
-       /// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
-       /// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
-       /// You MUST call send_commitment prior to any other calls on this Channel
+       ///
+       /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+       /// the wire:
+       /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+       ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+       ///   awaiting ACK.
+       /// * In cases where we're marked MonitorUpdateFailed, we cannot commit to a new state as we
+       ///   may not yet have sent the previous commitment update messages and will need to regenerate
+       ///   them.
+       ///
+       /// You MUST call send_commitment prior to calling any other methods on this Channel!
+       ///
        /// If an Err is returned, it's a ChannelError::Ignore!
        pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
@@ -4052,14 +4060,14 @@ impl<Signer: Sign> Channel<Signer> {
                        return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
                }
 
-               if (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
+               if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
                        // Note that this should never really happen, if we're !is_live() on receipt of an
                        // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
                        // the user to send directly into a !is_live() channel. However, if we
                        // disconnected during the time the previous hop was doing the commitment dance we may
                        // end up getting here after the forwarding delay. In any case, returning an
                        // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
-                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update".to_owned()));
+                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
                let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
@@ -4104,7 +4112,7 @@ impl<Signer: Sign> Channel<Signer> {
                }
 
                // Now update local state:
-               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) {
+               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
                                amount_msat,
                                payment_hash,
index 766b2394b7ad38e3566f56b72906f4efe96800b1..1f16781603ab43e5dc6f6ed0ae8a65b567a1c2d3 100644 (file)
@@ -632,7 +632,7 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA
 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub struct ChannelDetails {
        /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
@@ -677,8 +677,7 @@ pub struct ChannelDetails {
        /// point after the funding transaction received enough confirmations).
        pub is_funding_locked: bool,
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
-       /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
-       /// channel is not currently negotiating a shutdown.
+       /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
        /// This is a strict superset of `is_funding_locked`.
        pub is_usable: bool,
@@ -3346,14 +3345,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+       /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
                                // It's not a local channel
-                               return Ok(())
+                               return Ok(NotifyOption::SkipPersist)
                        }
                };
                match channel_state.by_id.entry(chan_id) {
@@ -3363,7 +3363,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                // If the announcement is about a channel of ours which is public, some
                                                // other peer may simply be forwarding all its gossip to us. Don't provide
                                                // a scary-looking error message and return Ok instead.
-                                               return Ok(());
+                                               return Ok(NotifyOption::SkipPersist);
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
@@ -3371,7 +3371,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        },
                        hash_map::Entry::Vacant(_) => unreachable!()
                }
-               Ok(())
+               Ok(NotifyOption::DoPersist)
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
@@ -4116,8 +4116,13 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
+                               persist
+                       } else {
+                               NotifyOption::SkipPersist
+                       }
+               });
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
@@ -4662,6 +4667,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                        return Err(DecodeError::InvalidValue);
                                } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
@@ -4681,6 +4688,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
                                log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                               log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+                               log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -4823,6 +4832,9 @@ mod tests {
        use core::sync::atomic::{AtomicBool, Ordering};
        use std::thread;
        use core::time::Duration;
+       use ln::functional_test_utils::*;
+       use ln::features::InitFeatures;
+       use ln::msgs::ChannelMessageHandler;
 
        #[test]
        fn test_wait_timeout() {
@@ -4865,6 +4877,53 @@ mod tests {
                        }
                }
        }
+
+       #[test]
+       fn test_notify_limits() {
+               // Check that a few cases which don't require the persistence of a new ChannelManager,
+               // indeed, do not cause the persistence of a new ChannelManager.
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+               // We check that the channel info nodes have doesn't change too early, even though we try
+               // to connect messages with new values
+               chan.0.contents.fee_base_msat *= 2;
+               chan.1.contents.fee_base_msat *= 2;
+               let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
+               let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+
+               // The first two nodes (which opened a channel) should now require fresh persistence
+               assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // ... but the last node should not.
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               // After persisting the first two nodes they should no longer need fresh persistence.
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
+               // about the channel.
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
+               nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // The nodes which are a party to the channel should also ignore messages from unrelated
+               // parties.
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+               nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+               assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+               assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+               // At this point the channel info given by peers should still be the same.
+               assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+               assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
index 78ffcfd1de522d7c84eeefe347e1671ee3f50a88..9e395f1107b1d62bfc26da2560b78e389987938c 100644 (file)
@@ -27,7 +27,7 @@ use ln::wire;
 use ln::wire::Encode;
 use util::byte_utils;
 use util::events::{MessageSendEvent, MessageSendEventsProvider};
-use util::logger::{Logger, Level};
+use util::logger::Logger;
 use routing::network_graph::NetGraphMsgHandler;
 
 use prelude::*;
index 1bdbff0333a520e0aced1526272d64bb94e75c40..dbb4178fb50e4643db6e2332d0e257bab18f8e29 100644 (file)
@@ -160,7 +160,6 @@ impl Writeable for Event {
                                write_tlv_fields!(writer, {
                                        (0, payment_preimage, required),
                                });
-                               payment_preimage.write(writer)?;
                        },
                        &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest,
                                #[cfg(test)]
index b4d274f683cb5b414b9a174e781feb978600e815..adaf631f8a93b4f63d68ae71bde3441e47635b92 100644 (file)
@@ -439,7 +439,7 @@ impl TestLogger {
 impl Logger for TestLogger {
        fn log(&self, record: &Record) {
                *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
-               if self.level >= record.level {
+               if record.level >= self.level {
                        println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args);
                }
        }