X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=1f3ad5541f1ba1d4697032d668c4f39765f29cb1;hb=5c2ff2cb30ef1639c80b275eea209a289dd91b77;hp=b29199e862442109963b8372a7bdc207ead7142a;hpb=7aa2caccd884cd7d40ee146323c2a65b7ea39407;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b29199e8..1f3ad554 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,9 +36,9 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; use chain; -use chain::{Confirm, Watch, BestBlock}; +use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. @@ -402,7 +402,7 @@ struct PendingInboundPayment { /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 /// and later, also stores information for retrying the payment. -enum PendingOutboundPayment { +pub(crate) enum PendingOutboundPayment { Legacy { session_privs: HashSet<[u8; 32]>, }, @@ -1384,6 +1384,18 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Helper function that issues the channel close events + fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + let mut pending_events_lock = self.pending_events.lock().unwrap(); + match channel.unbroadcasted_funding() { + Some(transaction) => { + pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction }) + }, + None => {}, + } + pending_events_lock.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: closure_reason }); + } + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1430,12 +1442,7 @@ impl ChannelMana msg: channel_update }); } - if let Ok(mut pending_events_lock) = self.pending_events.lock() { - pending_events_lock.push(events::Event::ChannelClosed { - channel_id: *channel_id, - reason: ClosureReason::HolderForceClosed - }); - } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -1526,13 +1533,12 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - let mut pending_events_lock = self.pending_events.lock().unwrap(); if peer_node_id.is_some() { if let Some(peer_msg) = peer_msg { - pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } }); + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); } } else { - pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed }); + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } chan.remove_entry().1 } else { @@ -1951,16 +1957,6 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { - session_privs: HashSet::new(), - pending_amt_msat: 0, - payment_hash: *payment_hash, - payment_secret: *payment_secret, - starting_block_height: self.best_block.read().unwrap().height(), - total_msat: total_value, - }); - assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -1978,12 +1974,27 @@ impl ChannelMana if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - payment_id, - }, onion_packet, &self.logger), channel_state, chan) + let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + }, onion_packet, &self.logger), + channel_state, chan); + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + payment_hash: *payment_hash, + payment_secret: *payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: total_value, + }); + assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); + + send_res } { Some((update_add, commitment_signed, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -2173,7 +2184,7 @@ impl ChannelMana } } else { return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { - err: "Payment with ID {} not found".to_string() + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), })) } }; @@ -3005,6 +3016,7 @@ impl ChannelMana network_update: None, all_paths_failed: payment.get().remaining_parts() == 0, path: path.clone(), + short_channel_id: None, #[cfg(test)] error_code: None, #[cfg(test)] @@ -3058,9 +3070,9 @@ impl ChannelMana match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! @@ -3071,6 +3083,7 @@ impl ChannelMana network_update, all_paths_failed, path: path.clone(), + short_channel_id, #[cfg(test)] error_code: onion_error_code, #[cfg(test)] @@ -3098,6 +3111,7 @@ impl ChannelMana network_update: None, all_paths_failed, path: path.clone(), + short_channel_id: Some(path.first().unwrap().short_channel_id), #[cfg(test)] error_code: Some(*failure_code), #[cfg(test)] @@ -3303,8 +3317,12 @@ impl ChannelMana sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat) } else { false }; if found_payment { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); self.pending_events.lock().unwrap().push( - events::Event::PaymentSent { payment_preimage } + events::Event::PaymentSent { + payment_preimage, + payment_hash: payment_hash + } ); } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); @@ -3367,27 +3385,7 @@ impl ChannelMana self.our_network_pubkey.clone() } - /// Restores a single, given channel to normal operation after a - /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update - /// operation. - /// - /// All ChannelMonitor updates up to and including highest_applied_update_id must have been - /// fully committed in every copy of the given channels' ChannelMonitors. - /// - /// Note that there is no effect to calling with a highest_applied_update_id other than the - /// current latest ChannelMonitorUpdate and one call to this function after multiple - /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field - /// exists largely only to prevent races between this and concurrent update_monitor calls. - /// - /// Thus, the anticipated use is, at a high level: - /// 1) You register a chain::Watch with this ChannelManager, - /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of - /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures - /// any time it cannot do so instantly, - /// 3) update(s) are applied to each remote copy of a ChannelMonitor, - /// 4) once all remote copies are updated, you call this function with the update_id that - /// completed, and once it is the latest the Channel will be re-enabled. - pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let chan_restoration_res; @@ -3696,7 +3694,7 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure }); + self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); } Ok(()) } @@ -4092,7 +4090,8 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } }, - MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => { + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | + MonitorEvent::UpdateFailed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; @@ -4108,7 +4107,12 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxConfirmed }); + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -4117,6 +4121,9 @@ impl ChannelMana }); } }, + MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, monitor_update_id); + }, } } @@ -4127,6 +4134,14 @@ impl ChannelMana has_pending_monitor_events } + /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without + /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor + /// update events as a separate process method here. + #[cfg(feature = "fuzztarget")] + pub fn process_monitor_events(&self) { + self.process_pending_monitor_events(); + } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. @@ -4224,12 +4239,7 @@ impl ChannelMana }); } - if let Ok(mut pending_events_lock) = self.pending_events.lock() { - pending_events_lock.push(events::Event::ChannelClosed { - channel_id: *channel_id, - reason: ClosureReason::CooperativeClosure - }); - } + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); @@ -4383,6 +4393,11 @@ impl ChannelMana self.process_pending_events(&event_handler); events.into_inner() } + + #[cfg(test)] + pub fn has_pending_payments(&self) -> bool { + !self.pending_outbound_payments.lock().unwrap().is_empty() + } } impl MessageSendEventsProvider for ChannelManager @@ -4661,7 +4676,7 @@ where msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxConfirmed }); + self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4852,7 +4867,7 @@ impl msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); false } else { true @@ -4867,7 +4882,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { no_channels_remain = false; @@ -5431,20 +5446,25 @@ impl Writeable f /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation /// is: -/// 1) Deserialize all stored ChannelMonitors. -/// 2) Deserialize the ChannelManager by filling in this struct and calling: -/// <(BlockHash, ChannelManager)>::read(reader, args) -/// This may result in closing some Channels if the ChannelMonitor is newer than the stored -/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted. -/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same -/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and -/// ChannelMonitor::get_funding_txo(). -/// 4) Reconnect blocks on your ChannelMonitors. -/// 5) Disconnect/connect blocks on the ChannelManager. -/// 6) Move the ChannelMonitors into your local chain::Watch. +/// 1) Deserialize all stored [`ChannelMonitor`]s. +/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling: +/// `<(BlockHash, ChannelManager)>::read(reader, args)` +/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored +/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted. +/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the +/// same way you would handle a [`chain::Filter`] call using +/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`]. +/// 4) Reconnect blocks on your [`ChannelMonitor`]s. +/// 5) Disconnect/connect blocks on the [`ChannelManager`]. +/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk. +/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you +/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in +/// the next step. +/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a +/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// -/// Note that the ordering of #4-6 is not of importance, however all three must occur before you -/// call any other methods on the newly-deserialized ChannelManager. +/// Note that the ordering of #4-7 is not of importance, however all four must occur before you +/// call any other methods on the newly-deserialized [`ChannelManager`]. /// /// Note that because some channels may be closed during deserialization, it is critical that you /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to @@ -5452,6 +5472,8 @@ impl Writeable f /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will /// not force-close the same channels but consider them live), you may end up revoking a state for /// which you've already broadcasted the transaction. +/// +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -5925,12 +5947,9 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First, send a partial MPP payment. - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); let payment_id = PaymentId([42; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. @@ -6007,8 +6026,9 @@ mod tests { // further events will be generated for subsequence path successes. let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { - Event::PaymentSent { payment_preimage: ref preimage } => { + Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => { assert_eq!(payment_preimage, *preimage); + assert_eq!(our_payment_hash, *hash); }, _ => panic!("Unexpected event"), } @@ -6182,12 +6202,9 @@ mod tests { let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); // Marshall an MPP route. - let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); @@ -6208,8 +6225,7 @@ mod tests { #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] pub mod bench { use chain::Listen; - use chain::chainmonitor::ChainMonitor; - use chain::channelmonitor::Persist; + use chain::chainmonitor::{ChainMonitor, Persist}; use chain::keysinterface::{KeysManager, InMemorySigner}; use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures};