Use ln OutPoints not bitcoin ones in SpendableOutputDescriptors
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 5466b3d23c8c96dafd651003ce85c268964d632a..83746d01a94c86d1d062c61f9f36b6e5224f5f9f 100644 (file)
@@ -1,3 +1,12 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
 //! The top-level channel management and payment tracking stuff lives here.
 //!
 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
@@ -29,14 +38,16 @@ use bitcoin::secp256k1;
 use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
 use chain::transaction::OutPoint;
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
+use ln::msgs::NetAddress;
 use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
 use util::config::UserConfig;
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -303,7 +314,7 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
        claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
-       pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
+       pub(super) pending_msg_events: Vec<MessageSendEvent>,
 }
 
 /// State we hold per-peer. In the future we should put channels in here, but for now we only hold
@@ -1474,7 +1485,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
+       const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
@@ -1494,7 +1505,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// only Tor Onion addresses.
        ///
        /// Panics if addresses is absurdly large (more than 500).
-       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
+       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
                let _ = self.total_consistency_lock.read().unwrap();
 
                if addresses.len() > 500 {
@@ -1822,6 +1833,44 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                } else { false }
        }
 
+       // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
+       // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
+       // be surfaced to the user.
+       fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) {
+               for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
+                       match htlc_src {
+                               HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
+                                       let (failure_code, onion_failure_data) =
+                                               match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
+                                                       hash_map::Entry::Occupied(chan_entry) => {
+                                                               if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+                                                                       (0x1000|7, upd.encode_with_len())
+                                                               } else {
+                                                                       (0x4000|10, Vec::new())
+                                                               }
+                                                       },
+                                                       hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
+                                               };
+                                       let channel_state = self.channel_state.lock().unwrap();
+                                       self.fail_htlc_backwards_internal(channel_state,
+                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+                               },
+                               HTLCSource::OutboundRoute { .. } => {
+                                       self.pending_events.lock().unwrap().push(
+                                               events::Event::PaymentFailed {
+                                                       payment_hash,
+                                                       rejected_by_dest: false,
+#[cfg(test)]
+                                                       error_code: None,
+#[cfg(test)]
+                                                       error_data: None,
+                                               }
+                                       )
+                               },
+                       };
+               }
+       }
+
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
        /// There are several callsites that do stupid things like loop over a list of payment_hashes
@@ -2670,23 +2719,27 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 
        fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
-               let (pending_forwards, mut pending_failures, short_channel_id) = {
+               let mut htlcs_to_fail = Vec::new();
+               let res = loop {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+                                               break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) =
-                                               try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
+                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       htlcs_to_fail = htlcs_to_fail_in;
                                        if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
                                                        assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
-                                                       return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
+                                                       break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
-                                                       return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures);
+                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
+                                                               break Err(e);
+                                                       } else { unreachable!(); }
                                                }
                                        }
                                        if let Some(updates) = commitment_update {
@@ -2701,17 +2754,22 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                        msg,
                                                });
                                        }
-                                       (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
+                                       break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")))
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+               self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
+               match res {
+                       Ok((pending_forwards, mut pending_failures, short_channel_id)) => {
+                               for failure in pending_failures.drain(..) {
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                               }
+                               self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
+                               Ok(())
+                       },
+                       Err(e) => Err(e)
                }
-               self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
-
-               Ok(())
        }
 
        fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
@@ -2792,6 +2850,10 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                if chan.get().get_their_node_id() != *their_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
+                               // Currently, we expect all holding cell update_adds to be dropped on peer
+                               // disconnect, so Channel's reestablish will never hand us any holding cell
+                               // freed HTLCs to fail backwards. If in the future we no longer drop pending
+                               // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
                                let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
                                        try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
                                if let Some(monitor_update) = monitor_update_opt {
@@ -2906,31 +2968,61 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
                }
        }
+
+       /// Process pending events from the ManyChannelMonitor.
+       fn process_pending_monitor_events(&self) {
+               let mut failed_channels = Vec::new();
+               {
+                       for monitor_event in self.monitor.get_and_clear_pending_monitor_events() {
+                               match monitor_event {
+                                       MonitorEvent::HTLCEvent(htlc_update) => {
+                                               if let Some(preimage) = htlc_update.payment_preimage {
+                                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+                                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+                                               } else {
+                                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                               }
+                                       },
+                                       MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+                                               let mut channel_lock = self.channel_state.lock().unwrap();
+                                               let channel_state = &mut *channel_lock;
+                                               let by_id = &mut channel_state.by_id;
+                                               let short_to_id = &mut channel_state.short_to_id;
+                                               let pending_msg_events = &mut channel_state.pending_msg_events;
+                                               if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+                                                       if let Some(short_id) = chan.get_short_channel_id() {
+                                                               short_to_id.remove(&short_id);
+                                                       }
+                                                       failed_channels.push(chan.force_shutdown(false));
+                                                       if let Ok(update) = self.get_channel_update(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                               }
+                                       },
+                               }
+                       }
+               }
+
+               for failure in failed_channels.drain(..) {
+                       self.finish_force_close_channel(failure);
+               }
+       }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
        where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
                                L::Target: Logger,
 {
-       fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
-               // TODO: Event release to users and serialization is currently race-y: it's very easy for a
-               // user to serialize a ChannelManager with pending events in it and lose those events on
-               // restart. This is doubly true for the fail/fulfill-backs from monitor events!
-               {
-                       //TODO: This behavior should be documented.
-                       for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
-                               if let Some(preimage) = htlc_update.payment_preimage {
-                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
-                               } else {
-                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
-                               }
-                       }
-               }
+       fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
+               //TODO: This behavior should be documented. It's non-intuitive that we query
+               // ChannelMonitors when clearing other events.
+               self.process_pending_monitor_events();
 
                let mut ret = Vec::new();
                let mut channel_state = self.channel_state.lock().unwrap();
@@ -2939,29 +3031,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
        where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
                                L::Target: Logger,
 {
-       fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
-               // TODO: Event release to users and serialization is currently race-y: it's very easy for a
-               // user to serialize a ChannelManager with pending events in it and lose those events on
-               // restart. This is doubly true for the fail/fulfill-backs from monitor events!
-               {
-                       //TODO: This behavior should be documented.
-                       for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
-                               if let Some(preimage) = htlc_update.payment_preimage {
-                                       log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                       self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
-                               } else {
-                                       log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
-                               }
-                       }
-               }
+       fn get_and_clear_pending_events(&self) -> Vec<Event> {
+               //TODO: This behavior should be documented. It's non-intuitive that we query
+               // ChannelMonitors when clearing other events.
+               self.process_pending_monitor_events();
 
                let mut ret = Vec::new();
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -3044,21 +3124,6 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                                }
                                        }
                                }
-                               if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height, &self.logger) {
-                                       if let Some(short_id) = channel.get_short_channel_id() {
-                                               short_to_id.remove(&short_id);
-                                       }
-                                       // If would_broadcast_at_height() is true, the channel_monitor will broadcast
-                                       // the latest local tx for us, so we should skip that here (it doesn't really
-                                       // hurt anything, but does make tests a bit simpler).
-                                       failed_channels.push(channel.force_shutdown(false));
-                                       if let Ok(update) = self.get_channel_update(&channel) {
-                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                       msg: update
-                                               });
-                                       }
-                                       return false;
-                               }
                                true
                        });
 
@@ -3258,6 +3323,10 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
                                channel_state.by_id.retain(|_, chan| {
                                        if chan.get_their_node_id() == *their_node_id {
+                                               // Note that currently on channel reestablish we assert that there are no
+                                               // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
+                                               // on peer disconnect here, there will need to be corresponding changes in
+                                               // reestablish logic.
                                                let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                                chan.to_disabled_marked();
                                                if !failed_adds.is_empty() {