X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=e39a778bc65722dc3e3fc176546552464922f54a;hb=248a107e3df81172a3df5304005bba9360df8432;hp=55271bce44e84c7235d9e3bb6d490a64dfaf3b43;hpb=b2cb1fb5338e9469ec5dc2420a3c3ec94f047388;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 55271bce..e39a778b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -54,22 +54,23 @@ use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; use util::logger::Logger; use util::errors::APIError; -use std::{cmp, mem}; +use core::{cmp, mem}; +use std::cell::RefCell; use std::collections::{HashMap, hash_map, HashSet}; use std::io::{Cursor, Read}; use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::Duration; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] use std::time::Instant; -use std::ops::Deref; +use core::ops::Deref; use bitcoin::hashes::hex::ToHex; // We hold various information about HTLC relay in the HTLC objects in Channel itself: @@ -766,22 +767,44 @@ macro_rules! handle_error { } } +/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error) +macro_rules! convert_chan_err { + ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { + match $err { + ChannelError::Ignore(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, + ChannelError::Close(msg) => { + log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg); + if let Some(short_id) = $channel.get_short_channel_id() { + $short_to_id.remove(&short_id); + } + let shutdown_res = $channel.force_shutdown(true); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + }, + ChannelError::CloseDelayBroadcast(msg) => { + log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); + if let Some(short_id) = $channel.get_short_channel_id() { + $short_to_id.remove(&short_id); + } + let shutdown_res = $channel.force_shutdown(false); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + } + } + } +} + macro_rules! break_chan_entry { ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { match $res { Ok(res) => res, - Err(ChannelError::Ignore(msg)) => { - break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) - }, - Err(ChannelError::Close(msg)) => { - log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + if drop { + $entry.remove_entry(); } - break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) - }, - Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + break Err(res); + } } } } @@ -790,25 +813,12 @@ macro_rules! try_chan_entry { ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => { match $res { Ok(res) => res, - Err(ChannelError::Ignore(msg)) => { - return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone())) - }, - Err(ChannelError::Close(msg)) => { - log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) - }, - Err(ChannelError::CloseDelayBroadcast(msg)) => { - log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + Err(e) => { + let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key()); + if drop { + $entry.remove_entry(); } - let shutdown_res = chan.force_shutdown(false); - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok())) + return Err(res); } } } @@ -818,13 +828,12 @@ macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { - log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..])); - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); + log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); + if let Some(short_id) = $chan.get_short_channel_id() { + $short_to_id.remove(&short_id); } // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which @@ -835,12 +844,12 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())); - res + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok())); + (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", - log_bytes!($entry.key()[..]), + log_bytes!($chan_id[..]), if $resend_commitment && $resend_raa { match $action_type { RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" }, @@ -857,11 +866,18 @@ macro_rules! handle_monitor_err { if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key())) + $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) }, } - } + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { { + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key()); + if drop { + $entry.remove_entry(); + } + res + } }; } macro_rules! return_monitor_err { @@ -1845,6 +1861,8 @@ impl ChannelMana /// Note that this includes RBF or similar transaction replacement strategies - lightning does /// not currently support replacing a funding transaction on an existing channel. Instead, /// create a new channel with a conflicting funding transaction. + /// + /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1908,7 +1926,7 @@ impl ChannelMana // be absurd. We ensure this by checking that at least 500 (our stated public contract on when // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; + const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; #[deny(const_err)] #[allow(dead_code)] // ...by failing to compile if the number of addresses that would be half of a message is @@ -2054,7 +2072,7 @@ impl ChannelMana }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id); - match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); @@ -3348,7 +3366,7 @@ impl ChannelMana } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let chan_restoration_res = { + let (htlcs_failed_forward, chan_restoration_res) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3361,7 +3379,7 @@ impl ChannelMana // disconnect, so Channel's reestablish will never hand us any holding cell // freed HTLCs to fail backwards. If in the future we no longer drop pending // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. - let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, shutdown) = + let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { @@ -3369,12 +3387,13 @@ impl ChannelMana msg, }); } - handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked) + (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; post_handle_chan_restoration!(self, chan_restoration_res); + self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id); Ok(()) } @@ -3433,52 +3452,105 @@ impl ChannelMana } } - /// Process pending events from the `chain::Watch`. - fn process_pending_monitor_events(&self) { + /// Process pending events from the `chain::Watch`, returning whether any events were processed. + fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); - { - for monitor_event in self.chain_monitor.release_pending_monitor_events() { - match monitor_event { - MonitorEvent::HTLCEvent(htlc_update) => { - if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); - } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); + let has_pending_monitor_events = !pending_monitor_events.is_empty(); + for monitor_event in pending_monitor_events { + match monitor_event { + MonitorEvent::HTLCEvent(htlc_update) => { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + } + }, + MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) { + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); } - }, - MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let by_id = &mut channel_state.by_id; - let short_to_id = &mut channel_state.short_to_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) { - if let Some(short_id) = chan.get_short_channel_id() { - short_to_id.remove(&short_id); - } - failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } - }, + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update }); } - }, - } + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, + }); + } + }, } } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + + has_pending_monitor_events + } + + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. + /// This should only apply to HTLCs which were added to the holding cell because we were + /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell + /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user + /// code to inform them of a channel monitor update. + fn check_free_holding_cells(&self) { + let mut failed_htlcs = Vec::new(); + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_free_holding_cell_htlcs(&self.logger) { + Ok((None, ref htlcs)) if htlcs.is_empty() => true, + Ok((commitment_opt, holding_cell_failed_htlcs)) => { + failed_htlcs.push((holding_cell_failed_htlcs, *channel_id)); + if let Some((commitment_update, monitor_update)) = commitment_opt { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id); + handle_errors.push((chan.get_counterparty_node_id(), res)); + if close_channel { return false; } + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: commitment_update, + }); + } + } + true + }, + Err(e) => { + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + for (failures, channel_id) in failed_htlcs.drain(..) { + self.fail_holding_cell_htlcs(failures, channel_id); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } } /// Handle a list of channel failures during a block_connected or block_disconnected call, @@ -3603,6 +3675,14 @@ impl ChannelMana pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id) } + + #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] + pub fn get_and_clear_pending_events(&self) -> Vec { + let events = std::cell::RefCell::new(Vec::new()); + let event_handler = |event| events.borrow_mut().push(event); + self.process_pending_events(&event_handler); + events.into_inner() + } } impl MessageSendEventsProvider for ChannelManager @@ -3613,33 +3693,69 @@ impl MessageSend L::Target: Logger, { fn get_and_clear_pending_msg_events(&self) -> Vec { - //TODO: This behavior should be documented. It's non-intuitive that we query - // ChannelMonitors when clearing other events. - self.process_pending_monitor_events(); + let events = RefCell::new(Vec::new()); + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut result = NotifyOption::SkipPersist; - let mut ret = Vec::new(); - let mut channel_state = self.channel_state.lock().unwrap(); - mem::swap(&mut ret, &mut channel_state.pending_msg_events); - ret + // TODO: This behavior should be documented. It's unintuitive that we query + // ChannelMonitors when clearing other events. + if self.process_pending_monitor_events() { + result = NotifyOption::DoPersist; + } + + self.check_free_holding_cells(); + + let mut pending_events = Vec::new(); + let mut channel_state = self.channel_state.lock().unwrap(); + mem::swap(&mut pending_events, &mut channel_state.pending_msg_events); + + if !pending_events.is_empty() { + events.replace(pending_events); + } + + result + }); + events.into_inner() } } impl EventsProvider for ChannelManager - where M::Target: chain::Watch, - T::Target: BroadcasterInterface, - K::Target: KeysInterface, - F::Target: FeeEstimator, - L::Target: Logger, +where + M::Target: chain::Watch, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger, { - fn get_and_clear_pending_events(&self) -> Vec { - //TODO: This behavior should be documented. It's non-intuitive that we query - // ChannelMonitors when clearing other events. - self.process_pending_monitor_events(); + /// Processes events that must be periodically handled. + /// + /// An [`EventHandler`] may safely call back to the provider in order to handle an event. + /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock. + /// + /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared + /// when processed, an [`EventHandler`] must be able to handle previously seen events when + /// restarting from an old state. + fn process_pending_events(&self, handler: H) where H::Target: EventHandler { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut result = NotifyOption::SkipPersist; - let mut ret = Vec::new(); - let mut pending_events = self.pending_events.lock().unwrap(); - mem::swap(&mut ret, &mut *pending_events); - ret + // TODO: This behavior should be documented. It's unintuitive that we query + // ChannelMonitors when clearing other events. + if self.process_pending_monitor_events() { + result = NotifyOption::DoPersist; + } + + let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); + if !pending_events.is_empty() { + result = NotifyOption::DoPersist; + } + + for event in pending_events.drain(..) { + handler.handle_event(event); + } + + result + }); } } @@ -3984,7 +4100,6 @@ impl fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); - let mut failed_payments = Vec::new(); let mut no_channels_remain = true; { let mut channel_state_lock = self.channel_state.lock().unwrap(); @@ -4013,15 +4128,7 @@ impl log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id)); channel_state.by_id.retain(|_, chan| { if chan.get_counterparty_node_id() == *counterparty_node_id { - // Note that currently on channel reestablish we assert that there are no - // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs - // on peer disconnect here, there will need to be corresponding changes in - // reestablish logic. - let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if !failed_adds.is_empty() { - let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe - failed_payments.push((chan_update, failed_adds)); - } + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); if chan.is_shutdown() { if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); @@ -4065,11 +4172,6 @@ impl for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } - for (chan_update, mut htlc_sources) in failed_payments { - for (htlc_source, payment_hash) in htlc_sources.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() }); - } - } } fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) { @@ -4840,9 +4942,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> mod tests { use ln::channelmanager::PersistenceNotifier; use std::sync::Arc; - use std::sync::atomic::{AtomicBool, Ordering}; + use core::sync::atomic::{AtomicBool, Ordering}; use std::thread; - use std::time::Duration; + use core::time::Duration; #[test] fn test_wait_timeout() { @@ -4901,7 +5003,7 @@ pub mod bench { use routing::router::get_route; use util::test_utils; use util::config::UserConfig; - use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256;