X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=98714252f5e27efd1c0dac2239ee819eedbd36f2;hb=0a2a40c4fd05f39b9485cc0f000f7066636783cb;hp=a28d6347332694af4dab7fc0737dd13a2a38c834;hpb=23240125b96647dc36f234f6b53acc62059ee113;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a28d6347..98714252 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,7 +28,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{BlockHash, Txid}; -use bitcoin::secp256k1::key::{SecretKey,PublicKey}; +use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; @@ -48,6 +48,7 @@ use ln::msgs; use ln::msgs::NetAddress; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField}; +use ln::wire::Encode; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient}; use util::config::UserConfig; use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; @@ -163,7 +164,7 @@ enum OnionPayload { Invoice { /// This is only here for backwards-compatibility in serialization, in the future it can be /// removed, breaking clients running 0.0.106 and earlier. - _legacy_hop_data: msgs::FinalOnionHopData, + _legacy_hop_data: Option, }, /// Contains the payer-provided preimage. Spontaneous(PaymentPreimage), @@ -417,7 +418,7 @@ pub(super) struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - claimable_htlcs: HashMap>, + claimable_htlcs: HashMap)>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -1768,17 +1769,18 @@ impl ChannelMana }); } - fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let counterparty_node_id; let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - counterparty_node_id = chan_entry.get().get_counterparty_node_id(); + if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){ + return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); + } let per_peer_state = self.per_peer_state.read().unwrap(); let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { Some(peer_state) => { @@ -1803,7 +1805,7 @@ impl ChannelMana } channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id, + node_id: *counterparty_node_id, msg: shutdown_msg }); @@ -1826,7 +1828,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - let _ = handle_error!(self, result, counterparty_node_id); + let _ = handle_error!(self, result, *counterparty_node_id); Ok(()) } @@ -1847,8 +1849,8 @@ impl ChannelMana /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { - self.close_channel_internal(channel_id, None) + pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { + self.close_channel_internal(channel_id, counterparty_node_id, None) } /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs @@ -1870,8 +1872,8 @@ impl ChannelMana /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal - pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { - self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight)) } #[inline] @@ -1890,22 +1892,18 @@ impl ChannelMana } } - /// `peer_node_id` should be set when we receive a message from a peer, but not set when the + /// `peer_msg` should be set when we receive a message from a peer, but not set when the /// user closes, which will be re-exposed as the `ChannelClosed` reason. - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) { - if let Some(node_id) = peer_node_id { - if chan.get().get_counterparty_node_id() != *node_id { - return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); - } + if chan.get().get_counterparty_node_id() != *peer_node_id { + return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); } - if peer_node_id.is_some() { - if let Some(peer_msg) = peer_msg { - self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); - } + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); } else { self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } @@ -1927,10 +1925,12 @@ impl ChannelMana } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to - /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. - pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to + /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding + /// channel. + pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None, None) { + match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1950,7 +1950,7 @@ impl ChannelMana /// for each to the chain and rejecting new HTLCs on each. pub fn force_close_all_channels(&self) { for chan in self.list_channels() { - let _ = self.force_close_channel(&chan.channel_id); + let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id); } } @@ -2070,11 +2070,7 @@ impl ChannelMana return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6); } - let shared_secret = { - let mut arr = [0; 32]; - arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); - arr - }; + let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes(); if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other @@ -2253,7 +2249,7 @@ impl ChannelMana break None; } { - let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2)); + let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2)); if let Some(chan_update) = chan_update { if code == 0x1000 | 11 || code == 0x1000 | 12 { msg.amount_msat.write(&mut res).expect("Writes cannot fail"); @@ -2265,7 +2261,8 @@ impl ChannelMana // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 0u16.write(&mut res).expect("Writes cannot fail"); } - (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail"); + (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail"); + msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail"); chan_update.write(&mut res).expect("Writes cannot fail"); } return_err!(err, code, &res.0[..]); @@ -2324,7 +2321,7 @@ impl ChannelMana }; let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); + let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { signature: sig, @@ -2689,8 +2686,9 @@ impl ChannelMana /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern, &Transaction) -> Result> - (&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, find_funding_output: FundingOutput) -> Result<(), APIError> { + fn funding_transaction_generated_intern, &Transaction) -> Result>( + &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + ) -> Result<(), APIError> { let (chan, msg) = { let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) { Some(mut chan) => { @@ -2731,8 +2729,8 @@ impl ChannelMana } #[cfg(test)] - pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { - self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |_, tx| { + pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { + self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| { Ok(OutPoint { txid: tx.txid(), index: output_index }) }) } @@ -2759,7 +2757,7 @@ impl ChannelMana /// /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed - pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> { + pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); for inp in funding_transaction.input.iter() { @@ -2769,7 +2767,7 @@ impl ChannelMana }); } } - self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |chan, tx| { + self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { let mut output_index = None; let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { @@ -2925,11 +2923,7 @@ impl ChannelMana if let PendingHTLCRouting::Forward { onion_packet, .. } = routing { let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) { - let phantom_shared_secret = { - let mut arr = [0; 32]; - arr.copy_from_slice(&SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap())[..]); - arr - }; + let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { @@ -3104,7 +3098,7 @@ impl ChannelMana prev_funding_outpoint } => { let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing { PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => { - let _legacy_hop_data = payment_data.clone(); + let _legacy_hop_data = Some(payment_data.clone()); (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret) }, PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => @@ -3149,8 +3143,14 @@ impl ChannelMana macro_rules! check_total_value { ($payment_data: expr, $payment_preimage: expr) => {{ let mut payment_received_generated = false; - let htlcs = channel_state.claimable_htlcs.entry(payment_hash) - .or_insert(Vec::new()); + let purpose = || { + events::PaymentPurpose::InvoicePayment { + payment_preimage: $payment_preimage, + payment_secret: $payment_data.payment_secret, + } + }; + let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash) + .or_insert_with(|| (purpose(), Vec::new())); if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); @@ -3181,10 +3181,7 @@ impl ChannelMana htlcs.push(claimable_htlc); new_events.push(events::Event::PaymentReceived { payment_hash, - purpose: events::PaymentPurpose::InvoicePayment { - payment_preimage: $payment_preimage, - payment_secret: $payment_data.payment_secret, - }, + purpose: purpose(), amt: total_value, }); payment_received_generated = true; @@ -3222,11 +3219,12 @@ impl ChannelMana OnionPayload::Spontaneous(preimage) => { match channel_state.claimable_htlcs.entry(payment_hash) { hash_map::Entry::Vacant(e) => { - e.insert(vec![claimable_htlc]); + let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); + e.insert((purpose.clone(), vec![claimable_htlc])); new_events.push(events::Event::PaymentReceived { payment_hash, amt: amt_to_forward, - purpose: events::PaymentPurpose::SpontaneousPayment(preimage), + purpose, }); }, hash_map::Entry::Occupied(_) => { @@ -3465,7 +3463,7 @@ impl ChannelMana true }); - channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { + channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() { // This should be unreachable debug_assert!(false); @@ -3509,7 +3507,7 @@ impl ChannelMana let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); - if let Some(mut sources) = removed_source { + if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); @@ -3551,12 +3549,13 @@ impl ChannelMana fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel) -> (u16, Vec) { debug_assert_eq!(desired_err_code & 0x1000, 0x1000); if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) { - let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4)); + let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6)); if desired_err_code == 0x1000 | 20 { // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 0u16.write(&mut enc).expect("Writes cannot fail"); } - (upd.serialized_length() as u16).write(&mut enc).expect("Writes cannot fail"); + (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail"); + msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail"); upd.write(&mut enc).expect("Writes cannot fail"); (desired_err_code, enc.0) } else { @@ -3789,26 +3788,29 @@ impl ChannelMana /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any /// [`MessageSendEvent`]s needed to claim the payment. /// + /// Note that calling this method does *not* guarantee that the payment has been claimed. You + /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be + /// provided to your [`EventHandler`] when [`process_pending_events`] is next called. + /// /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived` /// event matches your expectation. If you fail to do so and call this method, you may provide /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// - /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now - /// pending for processing via [`get_and_clear_pending_msg_events`]. - /// /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived + /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed + /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events - pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool { + pub fn claim_funds(&self, payment_preimage: PaymentPreimage) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); - if let Some(mut sources) = removed_source { + if let Some((payment_purpose, mut sources)) = removed_source { assert!(!sources.is_empty()); // If we are claiming an MPP payment, we have to take special care to ensure that each @@ -3822,12 +3824,14 @@ impl ChannelMana // we got all the HTLCs and then a channel closed while we were waiting for the user to // provide the preimage, so worrying too much about the optimal handling isn't worth // it. + let mut claimable_amt_msat = 0; let mut valid_mpp = true; for htlc in sources.iter() { if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } + claimable_amt_msat += htlc.value; } let mut errs = Vec::new(); @@ -3863,6 +3867,14 @@ impl ChannelMana } } + if claimed_any_htlcs { + self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { + payment_hash, + purpose: payment_purpose, + amt: claimable_amt_msat, + }); + } + // Now that we've done the entire above loop in one lock, we can handle any errors // which were generated. channel_state.take(); @@ -3871,9 +3883,7 @@ impl ChannelMana let res: Result<(), _> = Err(err); let _ = handle_error!(self, res, counterparty_node_id); } - - claimed_any_htlcs - } else { false } + } } fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { @@ -3963,7 +3973,7 @@ impl ChannelMana } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); @@ -4054,12 +4064,14 @@ impl ChannelMana } else { None }; let mut pending_events = self.pending_events.lock().unwrap(); + let prev_channel_id = Some(prev_outpoint.to_channel_id()); + let next_channel_id = Some(next_channel_id); - let source_channel_id = Some(prev_outpoint.to_channel_id()); pending_events.push(events::Event::PaymentForwarded { - source_channel_id, fee_earned_msat, claim_from_onchain_tx: from_onchain, + prev_channel_id, + next_channel_id, }); } } @@ -4117,7 +4129,9 @@ impl ChannelMana /// Called to accept a request to open a channel after [`Event::OpenChannelRequest`] has been /// triggered. /// - /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted. + /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted, + /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open + /// the channel. /// /// For inbound channels, the `user_channel_id` parameter will be provided back in /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond @@ -4125,7 +4139,7 @@ impl ChannelMana /// /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id - pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> { + pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut channel_state_lock = self.channel_state.lock().unwrap(); @@ -4135,6 +4149,9 @@ impl ChannelMana if !channel.get().inbound_is_awaiting_accept() { return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); } + if *counterparty_node_id != channel.get().get_counterparty_node_id() { + return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() }); + } channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { node_id: channel.get().get_counterparty_node_id(), msg: channel.get_mut().accept_inbound_channel(user_channel_id), @@ -4217,6 +4234,7 @@ impl ChannelMana let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::FundingGenerationReady { temporary_channel_id: msg.temporary_channel_id, + counterparty_node_id: *counterparty_node_id, channel_value_satoshis: value, output_script, user_channel_id: user_id, @@ -4512,7 +4530,7 @@ impl ChannelMana hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); Ok(()) } @@ -4832,48 +4850,50 @@ impl ChannelMana let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for monitor_event in pending_monitor_events.drain(..) { - match monitor_event { - MonitorEvent::HTLCEvent(htlc_update) => { - if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); - } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); - } - }, - MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | - MonitorEvent::UpdateFailed(funding_outpoint) => { - let mut channel_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_lock; - let by_id = &mut channel_state.by_id; - let pending_msg_events = &mut channel_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { - let mut chan = remove_channel!(self, channel_state, chan_entry); - failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update + for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) { + for monitor_event in monitor_events.drain(..) { + match monitor_event { + MonitorEvent::HTLCEvent(htlc_update) => { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + } else { + log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + } + }, + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | + MonitorEvent::UpdateFailed(funding_outpoint) => { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_lock; + let by_id = &mut channel_state.by_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) { + let mut chan = remove_channel!(self, channel_state, chan_entry); + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); + pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } + }, }); } - let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { - ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } - } else { - ClosureReason::CommitmentTxConfirmed - }; - self.issue_channel_close_events(&chan, reason); - pending_msg_events.push(events::MessageSendEvent::HandleError { - node_id: chan.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() } - }, - }); - } - }, - MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { - self.channel_monitor_updated(&funding_txo, monitor_update_id); - }, + }, + MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, monitor_update_id); + }, + } } } @@ -5535,7 +5555,7 @@ where }); if let Some(height) = height_opt { - channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { + channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { htlcs.retain(|htlc| { // If height is approaching the number of blocks we think it takes us to get // our commitment transaction confirmed before the HTLC expires, plus the @@ -5703,39 +5723,21 @@ impl let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; let short_to_id = &mut channel_state.short_to_id; - if no_connection_possible { - log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id)); - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { + log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", + log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); + channel_state.by_id.retain(|_, chan| { + if chan.get_counterparty_node_id() == *counterparty_node_id { + chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); + if chan.is_shutdown() { update_maps_on_chan_removal!(self, short_to_id, chan); - failed_channels.push(chan.force_shutdown(true)); - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - false + return false; } else { - true + no_channels_remain = false; } - }); - } else { - log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id)); - channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - if chan.is_shutdown() { - update_maps_on_chan_removal!(self, short_to_id, chan); - self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); - return false; - } else { - no_channels_remain = false; - } - } - true - }) - } + } + true + }); pending_msg_events.retain(|msg| { match msg { &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id, @@ -5819,7 +5821,7 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data)); } } } else { @@ -5841,7 +5843,7 @@ impl } // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data)); } } } @@ -6074,13 +6076,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { impl Writeable for ClaimableHTLC { fn write(&self, writer: &mut W) -> Result<(), io::Error> { - let payment_data = match &self.onion_payload { - OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data), - _ => None, - }; - let keysend_preimage = match self.onion_payload { - OnionPayload::Invoice { .. } => None, - OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), + let (payment_data, keysend_preimage) = match &self.onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None), + OnionPayload::Spontaneous(preimage) => (None, Some(preimage)), }; write_tlv_fields!(writer, { (0, self.prev_hop, required), @@ -6121,13 +6119,13 @@ impl Readable for ClaimableHTLC { OnionPayload::Spontaneous(p) }, None => { - if payment_data.is_none() { - return Err(DecodeError::InvalidValue) - } if total_msat.is_none() { + if payment_data.is_none() { + return Err(DecodeError::InvalidValue) + } total_msat = Some(payment_data.as_ref().unwrap().total_msat); } - OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() } + OnionPayload::Invoice { _legacy_hop_data: payment_data } }, }; Ok(Self { @@ -6300,13 +6298,15 @@ impl Writeable f } } + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() { + for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; for htlc in previous_hops.iter() { htlc.write(writer)?; } + htlc_purposes.push(purpose); } let per_peer_state = self.per_peer_state.write().unwrap(); @@ -6383,6 +6383,7 @@ impl Writeable f (3, pending_outbound_payments, required), (5, self.our_network_pubkey, required), (7, self.fake_scid_rand_bytes, required), + (9, htlc_purposes, vec_type), }); Ok(()) @@ -6601,15 +6602,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); for _ in 0..claimable_htlcs_count { let payment_hash = Readable::read(reader)?; let previous_hops_len: u64 = Readable::read(reader)?; let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::())); for _ in 0..previous_hops_len { - previous_hops.push(Readable::read(reader)?); + previous_hops.push(::read(reader)?); } - claimable_htlcs.insert(payment_hash, previous_hops); + claimable_htlcs_list.push((payment_hash, previous_hops)); } let peer_count: u64 = Readable::read(reader)?; @@ -6679,11 +6680,13 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut pending_outbound_payments = None; let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; + let mut claimable_htlc_purposes = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (3, pending_outbound_payments, option), (5, received_network_pubkey, option), (7, fake_scid_rand_bytes, option), + (9, claimable_htlc_purposes, vec_type), }); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes()); @@ -6706,7 +6709,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> // payments which are still in-flight via their on-chain state. // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ - for (_, monitor) in args.channel_monitors { + for (_, monitor) in args.channel_monitors.iter() { if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { @@ -6744,6 +6747,49 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } + let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); + let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); + + let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len()); + if let Some(mut purposes) = claimable_htlc_purposes { + if purposes.len() != claimable_htlcs_list.len() { + return Err(DecodeError::InvalidValue); + } + for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) { + claimable_htlcs.insert(payment_hash, (purpose, previous_hops)); + } + } else { + // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do + // include a `_legacy_hop_data` in the `OnionPayload`. + for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) { + if previous_hops.is_empty() { + return Err(DecodeError::InvalidValue); + } + let purpose = match &previous_hops[0].onion_payload { + OnionPayload::Invoice { _legacy_hop_data } => { + if let Some(hop_data) = _legacy_hop_data { + events::PaymentPurpose::InvoicePayment { + payment_preimage: match pending_inbound_payments.get(&payment_hash) { + Some(inbound_payment) => inbound_payment.payment_preimage, + None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) { + Ok(payment_preimage) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0)); + return Err(DecodeError::InvalidValue); + } + } + }, + payment_secret: hop_data.payment_secret, + } + } else { return Err(DecodeError::InvalidValue); } + }, + OnionPayload::Spontaneous(payment_preimage) => + events::PaymentPurpose::SpontaneousPayment(*payment_preimage), + }; + claimable_htlcs.insert(payment_hash, (purpose, previous_hops)); + } + } + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); @@ -6789,8 +6835,38 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material(); - let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material); + for (_, monitor) in args.channel_monitors.iter() { + for (payment_hash, payment_preimage) in monitor.get_stored_preimages() { + if let Some(claimable_htlcs) = claimable_htlcs.remove(&payment_hash) { + log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0)); + for claimable_htlc in claimable_htlcs.1 { + // Add a holding-cell claim of the payment to the Channel, which should be + // applied ~immediately on peer reconnection. Because it won't generate a + // new commitment transaction we can just provide the payment preimage to + // the corresponding ChannelMonitor and nothing else. + // + // We do so directly instead of via the normal ChannelMonitor update + // procedure as the ChainMonitor hasn't yet been initialized, implying + // we're not allowed to call it directly yet. Further, we do the update + // without incrementing the ChannelMonitor update ID as there isn't any + // reason to. + // If we were to generate a new ChannelMonitor update ID here and then + // crash before the user finishes block connect we'd end up force-closing + // this channel as well. On the flip side, there's no harm in restarting + // without the new monitor persisted - we'll end up right back here on + // restart. + let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); + if let Some(channel) = by_id.get_mut(&previous_channel_id) { + channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger); + } + if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) { + previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger); + } + } + } + } + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -7044,8 +7120,10 @@ mod tests { // claim_funds_along_route because the ordering of the messages causes the second half of the // payment to be put in the holding cell, which confuses the test utilities. So we exchange the // lightning messages manually. - assert!(nodes[1].node.claim_funds(payment_preimage)); + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], our_payment_hash, 200_000); check_added_monitors!(nodes[1], 2); + let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); @@ -7429,7 +7507,7 @@ pub mod bench { tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut { value: 8_000_000, script_pubkey: output_script, }]}; - node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap(); + node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap(); } else { panic!(); } node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id())); @@ -7491,7 +7569,8 @@ pub mod bench { expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b }); expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000); - assert!($node_b.claim_funds(payment_preimage)); + $node_b.claim_funds(payment_preimage); + expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000); match $node_b.get_and_clear_pending_msg_events().pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, updates } => {