X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=152de61fd8e56719f116393c44f55c9144773b36;hb=fce631ca21b8609e2db2a5fe5f1858a365e2de63;hp=66c785654ab3eb886658447a758eb30bc577d94b;hpb=8db9c50b8b58d01099cef83167594cdc5f4a7842;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 66c78565..152de61f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -402,7 +402,7 @@ struct PendingInboundPayment { /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 /// and later, also stores information for retrying the payment. -enum PendingOutboundPayment { +pub(crate) enum PendingOutboundPayment { Legacy { session_privs: HashSet<[u8; 32]>, }, @@ -413,6 +413,8 @@ enum PendingOutboundPayment { pending_amt_msat: u64, /// The total payment amount across all paths, used to verify that a retry is not overpaying. total_msat: u64, + /// Our best known block height at the time this payment was initiated. + starting_block_height: u32, }, } @@ -1949,15 +1951,6 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { - session_privs: HashSet::new(), - pending_amt_msat: 0, - payment_hash: *payment_hash, - payment_secret: *payment_secret, - total_msat: total_value, - }); - assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -1975,12 +1968,27 @@ impl ChannelMana if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - }, onion_packet, &self.logger), channel_state, chan) + let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + }, onion_packet, &self.logger), + channel_state, chan); + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + payment_hash: *payment_hash, + payment_secret: *payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: total_value, + }); + assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); + + send_res } { Some((update_add, commitment_signed, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -2170,7 +2178,7 @@ impl ChannelMana } } else { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: "Payment with ID {} not found".to_string() + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), })) } }; @@ -3008,9 +3016,6 @@ impl ChannelMana error_data: None, } ); - if payment.get().remaining_parts() == 0 { - payment.remove(); - } } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@ -3048,7 +3053,6 @@ impl ChannelMana } if sessions.get().remaining_parts() == 0 { all_paths_failed = true; - sessions.remove(); } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@ -3545,7 +3549,16 @@ impl ChannelMana Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } + } + return res } funding_tx }, @@ -4375,6 +4388,11 @@ impl ChannelMana self.process_pending_events(&event_handler); events.into_inner() } + + #[cfg(test)] + pub fn has_pending_payments(&self) -> bool { + !self.pending_outbound_payments.lock().unwrap().is_empty() + } } impl MessageSendEventsProvider for ChannelManager @@ -4550,6 +4568,16 @@ where payment_secrets.retain(|_, inbound_payment| { inbound_payment.expiry_time > header.time as u64 }); + + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + outbounds.retain(|_, payment| { + const PAYMENT_EXPIRY_BLOCKS: u32 = 3; + if payment.remaining_parts() != 0 { return true } + if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment { + return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height + } + true + }); } fn get_relevant_txids(&self) -> Vec { @@ -5281,6 +5309,7 @@ impl_writeable_tlv_based_enum!(PendingOutboundPayment, (4, payment_secret, option), (6, total_msat, required), (8, pending_amt_msat, required), + (10, starting_block_height, required), }, ;); @@ -5639,6 +5668,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } + if forward_htlcs_count > 0 { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::()));