Fix build warnings without grind_signatures
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 0ca18da34856ae343edace6aa85dc9ec1937f921..7acd99e3eba491baca576de6e9282d2317907385 100644 (file)
@@ -69,6 +69,7 @@ use core::ops::Deref;
 
 #[cfg(any(test, feature = "std"))]
 use std::time::Instant;
+use util::crypto::sign;
 
 mod inbound_payment {
        use alloc::string::ToString;
@@ -441,6 +442,7 @@ struct ClaimableHTLC {
        cltv_expiry: u32,
        value: u64,
        onion_payload: OnionPayload,
+       timer_ticks: u8,
 }
 
 /// A payment identifier used to uniquely identify a payment to LDK.
@@ -496,6 +498,7 @@ impl core::hash::Hash for HTLCSource {
                }
        }
 }
+#[cfg(not(feature = "grind_signatures"))]
 #[cfg(test)]
 impl HTLCSource {
        pub fn dummy() -> Self {
@@ -886,6 +889,8 @@ impl PendingOutboundPayment {
 /// issues such as overly long function definitions. Note that the ChannelManager can take any
 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
 /// concrete type of the KeysManager.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
 
 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
@@ -896,6 +901,8 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Ar
 /// helps with issues such as long function definitions. Note that the ChannelManager can take any
 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
 /// concrete type of the KeysManager.
+///
+/// (C-not exported) as Arcs don't make sense in bindings
 pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
 
 /// Manager which keeps track of a number of channels and sends messages to the appropriate
@@ -1148,6 +1155,9 @@ const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_G
 /// pending HTLCs in flight.
 pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
 
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
+pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
+
 /// Information needed for constructing an invoice route hint for this channel.
 #[derive(Clone, Debug, PartialEq)]
 pub struct CounterpartyForwardingInfo {
@@ -1218,6 +1228,9 @@ pub struct ChannelDetails {
        /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
        /// when they see a payment to be routed to us.
        ///
+       /// Our counterparty may choose to rotate this value at any time, though will always recognize
+       /// previous values for inbound payment forwarding.
+       ///
        /// [`short_channel_id`]: Self::short_channel_id
        pub inbound_scid_alias: Option<u64>,
        /// The value, in satoshis, of this channel as appears in the funding output
@@ -1306,9 +1319,12 @@ pub struct ChannelDetails {
 }
 
 impl ChannelDetails {
-       /// Gets the SCID which should be used to identify this channel for inbound payments. This
-       /// should be used for providing invoice hints or in any other context where our counterparty
-       /// will forward a payment to us.
+       /// Gets the current SCID which should be used to identify this channel for inbound payments.
+       /// This should be used for providing invoice hints or in any other context where our
+       /// counterparty will forward a payment to us.
+       ///
+       /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
+       /// [`ChannelDetails::short_channel_id`]. See those for more information.
        pub fn get_inbound_payment_scid(&self) -> Option<u64> {
                self.inbound_scid_alias.or(self.short_channel_id)
        }
@@ -2413,7 +2429,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        };
                                        let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt {
                                                let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
-                                               let chan_update_opt = self.get_channel_update_for_broadcast(chan).ok();
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
@@ -2426,6 +2441,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        // we don't have the channel here.
                                                        break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
                                                }
+                                               let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
 
                                                // Note that we could technically not return an error yet here and just hope
                                                // that the connection is reestablished or monitor updated by the time we get
@@ -2475,21 +2491,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        break None;
                                }
                                {
-                                       let mut res = Vec::with_capacity(8 + 128);
+                                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2));
                                        if let Some(chan_update) = chan_update {
                                                if code == 0x1000 | 11 || code == 0x1000 | 12 {
-                                                       res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+                                                       msg.amount_msat.write(&mut res).expect("Writes cannot fail");
                                                }
                                                else if code == 0x1000 | 13 {
-                                                       res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+                                                       msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
                                                }
                                                else if code == 0x1000 | 20 {
                                                        // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
-                                                       res.extend_from_slice(&byte_utils::be16_to_array(0));
+                                                       0u16.write(&mut res).expect("Writes cannot fail");
                                                }
-                                               res.extend_from_slice(&chan_update.encode_with_len()[..]);
+                                               (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail");
+                                               chan_update.write(&mut res).expect("Writes cannot fail");
                                        }
-                                       return_err!(err, code, &res[..]);
+                                       return_err!(err, code, &res.0[..]);
                                }
                        }
                }
@@ -2525,6 +2542,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        Some(id) => id,
                };
 
+               self.get_channel_update_for_onion(short_channel_id, chan)
+       }
+       fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
                let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
 
                let unsigned = msgs::UnsignedChannelUpdate {
@@ -3063,7 +3084,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        excess_data: Vec::new(),
                };
                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
-               let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
+               let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3214,7 +3235,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        } else {
                                                                                                panic!("Stated return value requirements in send_htlc() were not met");
                                                                                        }
-                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, chan.get());
+                                                                                       let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::Reason { failure_code, data }
                                                                                        ));
@@ -3337,6 +3358,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                phantom_shared_secret,
                                                                        },
                                                                        value: amt_to_forward,
+                                                                       timer_ticks: 0,
                                                                        cltv_expiry,
                                                                        onion_payload,
                                                                };
@@ -3631,6 +3653,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
+                       let mut timed_out_mpp_htlcs = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
@@ -3679,6 +3702,32 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        true
                                });
+
+                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                                       if htlcs.is_empty() {
+                                               // This should be unreachable
+                                               debug_assert!(false);
+                                               return false;
+                                       }
+                                       if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
+                                               // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+                                               // In this case we're not going to handle any timeouts of the parts here.
+                                               if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+                                                       return true;
+                                               } else if htlcs.into_iter().any(|htlc| {
+                                                       htlc.timer_ticks += 1;
+                                                       return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+                                               }) {
+                                                       timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
+                                                       return false;
+                                               }
+                                       }
+                                       true
+                               });
+                       }
+
+                       for htlc_source in timed_out_mpp_htlcs.drain(..) {
+                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
                        }
 
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3714,9 +3763,32 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
        /// that we want to return and a channel.
-       fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+       ///
+       /// This is for failures on the channel on which the HTLC was *received*, not failures
+       /// forwarding
+       fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+               // We can't be sure what SCID was used when relaying inbound towards us, so we have to
+               // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
+               // we're not leaking that we have a channel with the counterparty), otherwise we try to use
+               // an inbound SCID alias before the real SCID.
+               let scid_pref = if chan.should_announce() {
+                       chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+               } else {
+                       chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+               };
+               if let Some(scid) = scid_pref {
+                       self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
+               } else {
+                       (0x4000|10, Vec::new())
+               }
+       }
+
+
+       /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
+       /// that we want to return and a channel.
+       fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
                debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
-               if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
+               if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
                        let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4));
                        if desired_err_code == 0x1000 | 20 {
                                // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
@@ -3744,7 +3816,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let (failure_code, onion_failure_data) =
                                                match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
                                                        hash_map::Entry::Occupied(chan_entry) => {
-                                                               self.get_htlc_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+                                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
                                                        },
                                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                                };
@@ -4282,8 +4354,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted.
        ///
-       /// [`Event::OpenChannelRequest`]: crate::util::events::Event::OpenChannelRequest
-       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32]) -> Result<(), APIError> {
+       /// For inbound channels, the `user_channel_id` parameter will be provided back in
+       /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+       /// with which `accept_inbound_channel` call.
+       ///
+       /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+       /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state_lock = self.channel_state.lock().unwrap();
@@ -4295,7 +4372,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
-                                       msg: channel.get_mut().accept_inbound_channel(),
+                                       msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                                });
                        }
                        hash_map::Entry::Vacant(_) => {
@@ -4336,7 +4413,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if !self.default_configuration.manually_accept_inbound_channels {
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: counterparty_node_id.clone(),
-                                               msg: channel.accept_inbound_channel(),
+                                               msg: channel.accept_inbound_channel(0),
                                        });
                                } else {
                                        let mut pending_events = self.pending_events.lock().unwrap();
@@ -4634,7 +4711,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        match pending_forward_info {
                                                PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
                                                        let reason = if (error_code & 0x1000) != 0 {
-                                                               let (real_code, error_data) = self.get_htlc_temp_fail_err_and_data(error_code, chan);
+                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data)
                                                        } else {
                                                                onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
@@ -5521,6 +5598,12 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
+
+               let last_best_block_height = self.best_block.read().unwrap().height();
+               if height < last_best_block_height {
+                       let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
+               }
        }
 
        fn best_block_updated(&self, header: &BlockHeader, height: u32) {
@@ -5627,8 +5710,8 @@ where
                                let res = f(channel);
                                if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
-                                               let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
-                                               timed_out_htlcs.push((source, payment_hash,  HTLCFailReason::Reason {
+                                               let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+                                               timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
                                                }));
                                        }
@@ -5910,6 +5993,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
                                        &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
+                                       &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
                                }
                        });
                }
@@ -6268,6 +6352,7 @@ impl Readable for ClaimableHTLC {
                };
                Ok(Self {
                        prev_hop: prev_hop.0.unwrap(),
+                       timer_ticks: 0,
                        value,
                        onion_payload,
                        cltv_expiry,
@@ -7342,8 +7427,8 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
@@ -7386,8 +7471,8 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
                let route_params = RouteParameters {
@@ -7552,8 +7637,8 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));