X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=5e43158b1c9c63d4da2400858f4060b55474efa4;hb=1955008d6d1322733039cea486691fe00af094e7;hp=583ebfb9b73b82352c55a8f775c7d7c3920a30eb;hpb=c986e52ce83e9aeaa9447abebc5f6600470337cf;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 583ebfb9..5e43158b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -43,7 +43,6 @@ use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -pub use ln::channel::CounterpartyForwardingInfo; use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; @@ -53,9 +52,9 @@ use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::{byte_utils, events}; -use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; +use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; use util::logger::{Logger, Level}; use util::errors::APIError; @@ -243,6 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, + chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -258,6 +258,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -268,12 +269,13 @@ impl MsgHandleErrInternal { err, action: msgs::ErrorAction::IgnoreError, }, + chan_id: None, shutdown_finish: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { @@ -287,6 +289,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: Some(channel_id), shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -321,6 +324,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -485,14 +489,17 @@ pub struct ChannelManager>, + pending_outbound_payments: Mutex>>, our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -644,6 +651,19 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +/// Information needed for constructing an invoice route hint for this channel. +#[derive(Clone, Debug, PartialEq)] +pub struct CounterpartyForwardingInfo { + /// Base routing fee in millisatoshis. + pub fee_base_msat: u32, + /// Amount in millionths of a satoshi the channel will charge per transferred satoshi. + pub fee_proportional_millionths: u32, + /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart, + /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s + /// `cltv_expiry_delta` for more details. + pub cltv_expiry_delta: u16, +} + /// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] /// to better separate parameters. #[derive(Clone, Debug, PartialEq)] @@ -798,12 +818,13 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $counterparty_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { #[cfg(debug_assertions)] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. assert!($self.channel_state.try_lock().is_ok()); + assert!($self.pending_events.try_lock().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -815,6 +836,9 @@ macro_rules! handle_error { msg: update }); } + if let Some(channel_id) = chan_id { + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } }); + } } log_error!($self.logger, "{}", err.err); @@ -1156,7 +1180,7 @@ impl ChannelMana pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(HashMap::new()), - pending_outbound_payments: Mutex::new(HashSet::new()), + pending_outbound_payments: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), @@ -1302,6 +1326,18 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Helper function that issues the channel close events + fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + let mut pending_events_lock = self.pending_events.lock().unwrap(); + match channel.unbroadcasted_funding() { + Some(transaction) => { + pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction }) + }, + None => {}, + } + pending_events_lock.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: closure_reason }); + } + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1348,6 +1384,7 @@ impl ChannelMana msg: channel_update }); } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -1423,7 +1460,9 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { + /// `peer_node_id` should be set when we receive a message from a peer, but not set when the + /// user closes, which will be re-exposed as the `ChannelClosed` reason. + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1436,6 +1475,13 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + if peer_node_id.is_some() { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } + } else { + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + } chan.remove_entry().1 } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); @@ -1457,7 +1503,7 @@ impl ChannelMana /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None) { + match self.force_close_channel_with_peer(channel_id, None, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1853,7 +1899,9 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes)); + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new()); + assert!(sessions.insert(session_priv_bytes)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -2399,6 +2447,7 @@ impl ChannelMana if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + // ChannelClosed event is generated by handle_error for us. Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } @@ -2832,23 +2881,29 @@ impl ChannelMana self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, - HTLCSource::OutboundRoute { session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash, - rejected_by_dest: false, - network_update: None, -#[cfg(test)] - error_code: None, -#[cfg(test)] - error_data: None, + HTLCSource::OutboundRoute { session_priv, mpp_id, path, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { + if sessions.get_mut().remove(&session_priv_bytes) { + self.pending_events.lock().unwrap().push( + events::Event::PaymentPathFailed { + payment_hash, + rejected_by_dest: false, + network_update: None, + all_paths_failed: sessions.get().len() == 0, + path: path.clone(), + #[cfg(test)] + error_code: None, + #[cfg(test)] + error_data: None, + } + ); + if sessions.get().len() == 0 { + sessions.remove(); } - ) + } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); } @@ -2873,12 +2928,21 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { + HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let mut all_paths_failed = false; + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { + if !sessions.get_mut().remove(&session_priv_bytes) { + log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); + return; + } + if sessions.get().len() == 0 { + all_paths_failed = true; + sessions.remove(); + } + } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } @@ -2894,10 +2958,12 @@ impl ChannelMana // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { + events::Event::PaymentPathFailed { payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, network_update, + all_paths_failed, + path: path.clone(), #[cfg(test)] error_code: onion_error_code, #[cfg(test)] @@ -2919,10 +2985,12 @@ impl ChannelMana // TODO: For non-temporary failures, we really should be closing the // channel here as we apparently can't relay through them anyway. self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { + events::Event::PaymentPathFailed { payment_hash: payment_hash.clone(), rejected_by_dest: path.len() == 1, network_update: None, + all_paths_failed, + path: path.clone(), #[cfg(test)] error_code: Some(*failure_code), #[cfg(test)] @@ -3119,17 +3187,18 @@ impl ChannelMana fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { - HTLCSource::OutboundRoute { session_priv, .. } => { + HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { mem::drop(channel_state_lock); - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) { + sessions.remove(&session_priv_bytes) + } else { false }; + if found_payment { + self.pending_events.lock().unwrap().push( + events::Event::PaymentSent { payment_preimage } + ); } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); } @@ -3511,6 +3580,7 @@ impl ChannelMana msg: update }); } + self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); } Ok(()) } @@ -3906,7 +3976,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } }, - MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; @@ -3922,6 +3992,7 @@ impl ChannelMana msg: update }); } + self.issue_channel_close_events(&chan, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -3983,6 +4054,7 @@ impl ChannelMana Err(e) => { let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us !close_channel } } @@ -4036,6 +4108,8 @@ impl ChannelMana }); } + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); false @@ -4456,6 +4530,7 @@ where msg: update }); } + self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4646,6 +4721,7 @@ impl msg: update }); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); false } else { true @@ -4660,6 +4736,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { no_channels_remain = false; @@ -4750,12 +4827,12 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } else { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } @@ -4857,10 +4934,74 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (8, outgoing_cltv_value, required) }); -impl_writeable_tlv_based_enum!(HTLCFailureMsg, ; - (0, Relay), - (1, Malformed), -); + +impl Writeable for HTLCFailureMsg { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => { + 0u8.write(writer)?; + channel_id.write(writer)?; + htlc_id.write(writer)?; + reason.write(writer)?; + }, + HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id, htlc_id, sha256_of_onion, failure_code + }) => { + 1u8.write(writer)?; + channel_id.write(writer)?; + htlc_id.write(writer)?; + sha256_of_onion.write(writer)?; + failure_code.write(writer)?; + }, + } + Ok(()) + } +} + +impl Readable for HTLCFailureMsg { + fn read(reader: &mut R) -> Result { + let id: u8 = Readable::read(reader)?; + match id { + 0 => { + Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: Readable::read(reader)?, + htlc_id: Readable::read(reader)?, + reason: Readable::read(reader)?, + })) + }, + 1 => { + Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id: Readable::read(reader)?, + htlc_id: Readable::read(reader)?, + sha256_of_onion: Readable::read(reader)?, + failure_code: Readable::read(reader)?, + })) + }, + // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but + // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network + // messages contained in the variants. + // In version 0.0.101, support for reading the variants with these types was added, and + // we should migrate to writing these variants when UpdateFailHTLC or + // UpdateFailMalformedHTLC get TLV fields. + 2 => { + let length: BigSize = Readable::read(reader)?; + let mut s = FixedLengthReader::new(reader, length.0); + let res = Readable::read(&mut s)?; + s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes + Ok(HTLCFailureMsg::Relay(res)) + }, + 3 => { + let length: BigSize = Readable::read(reader)?; + let mut s = FixedLengthReader::new(reader, length.0); + let res = Readable::read(&mut s)?; + s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes + Ok(HTLCFailureMsg::Malformed(res)) + }, + _ => Err(DecodeError::UnknownRequiredFeature), + } + } +} + impl_writeable_tlv_based_enum!(PendingHTLCStatus, ; (0, Forward), (1, Fail), @@ -5105,12 +5246,21 @@ impl Writeable f } let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); - (pending_outbound_payments.len() as u64).write(writer)?; - for session_priv in pending_outbound_payments.iter() { - session_priv.write(writer)?; + // For backwards compat, write the session privs and their total length. + let mut num_pending_outbounds_compat: u64 = 0; + for (_, outbounds) in pending_outbound_payments.iter() { + num_pending_outbounds_compat += outbounds.len() as u64; + } + num_pending_outbounds_compat.write(writer)?; + for (_, outbounds) in pending_outbound_payments.iter() { + for outbound in outbounds.iter() { + outbound.write(writer)?; + } } - write_tlv_fields!(writer, {}); + write_tlv_fields!(writer, { + (1, pending_outbound_payments, required), + }); Ok(()) } @@ -5247,6 +5397,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; @@ -5277,6 +5428,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + channel_closures.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + reason: ClosureReason::OutdatedChannelManager + }); } else { if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); @@ -5363,19 +5518,31 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let pending_outbound_payments_count: u64 = Readable::read(reader)?; - let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32)); - for _ in 0..pending_outbound_payments_count { - if !pending_outbound_payments.insert(Readable::read(reader)?) { - return Err(DecodeError::InvalidValue); - } + let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; + let mut pending_outbound_payments_compat: HashMap> = + HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + for _ in 0..pending_outbound_payments_count_compat { + let session_priv = Readable::read(reader)?; + if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() { + return Err(DecodeError::InvalidValue) + }; } - read_tlv_fields!(reader, {}); + let mut pending_outbound_payments = None; + read_tlv_fields!(reader, { + (1, pending_outbound_payments, option), + }); + if pending_outbound_payments.is_none() { + pending_outbound_payments = Some(pending_outbound_payments_compat); + } let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -5392,7 +5559,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(pending_inbound_payments), - pending_outbound_payments: Mutex::new(pending_outbound_payments), + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), our_network_key: args.keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()), @@ -5653,7 +5820,8 @@ mod tests { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); - // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here. + // Note that successful MPP payments will generate 1 event upon the first path's success. No + // further events will be generated for subsequence path successes. let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { Event::PaymentSent { payment_preimage: ref preimage } => { @@ -5661,12 +5829,6 @@ mod tests { }, _ => panic!("Unexpected event"), } - match events[1] { - Event::PaymentSent { payment_preimage: ref preimage } => { - assert_eq!(payment_preimage, *preimage); - }, - _ => panic!("Unexpected event"), - } } #[test]