X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=bd5b8e11212397bc7321f6425c71a643128fbc2a;hb=a5b7cf2c69f2ac05279335006b1b172e2f0cb8c9;hp=94bbcf2ae678eb96693ec7a01c1ff4d0a2037bb4;hpb=645e056a07d16608209f9b43466d088c7c42f53e;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 94bbcf2a..bd5b8e11 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -181,6 +181,7 @@ pub(super) enum HTLCForwardInfo { pub(crate) struct HTLCPreviousHopData { // Note that this may be an outbound SCID alias for the associated channel. short_channel_id: u64, + user_channel_id: Option, htlc_id: u64, incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, @@ -221,6 +222,17 @@ struct ClaimableHTLC { counterparty_skimmed_fee_msat: Option, } +impl From<&ClaimableHTLC> for events::ClaimedHTLC { + fn from(val: &ClaimableHTLC) -> Self { + events::ClaimedHTLC { + channel_id: val.prev_hop.outpoint.to_channel_id(), + user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), + cltv_expiry: val.cltv_expiry, + value_msat: val.value, + } + } +} + /// A payment identifier used to uniquely identify a payment to LDK. /// /// This is not exported to bindings users as we just use [u8; 32] directly @@ -496,11 +508,15 @@ struct ClaimingPayment { amount_msat: u64, payment_purpose: events::PaymentPurpose, receiver_node_id: PublicKey, + htlcs: Vec, + sender_intended_value: Option, } impl_writeable_tlv_based!(ClaimingPayment, { (0, amount_msat, required), (2, payment_purpose, required), (4, receiver_node_id, required), + (5, htlcs, optional_vec), + (7, sender_intended_value, option), }); struct ClaimablePayment { @@ -660,6 +676,13 @@ pub(super) struct PeerState { /// been assigned a `channel_id`, the entry in this map is removed and one is created in /// `channel_by_id`. pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel>, + /// `temporary_channel_id` -> `InboundChannelRequest`. + /// + /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where + /// the peer is the counterparty. If the channel is accepted, then the entry in this table is + /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If + /// the channel is rejected, then the entry is simply removed. + pub(super) inbound_channel_request_by_id: HashMap<[u8; 32], InboundChannelRequest>, /// The latest `InitFeatures` we heard from the peer. latest_features: InitFeatures, /// Messages to send to the peer - pushed to in the same lock that they are generated in (except @@ -714,17 +737,32 @@ impl PeerState { fn total_channel_count(&self) -> usize { self.channel_by_id.len() + self.outbound_v1_channel_by_id.len() + - self.inbound_v1_channel_by_id.len() + self.inbound_v1_channel_by_id.len() + + self.inbound_channel_request_by_id.len() } // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer. fn has_channel(&self, channel_id: &[u8; 32]) -> bool { self.channel_by_id.contains_key(channel_id) || self.outbound_v1_channel_by_id.contains_key(channel_id) || - self.inbound_v1_channel_by_id.contains_key(channel_id) + self.inbound_v1_channel_by_id.contains_key(channel_id) || + self.inbound_channel_request_by_id.contains_key(channel_id) } } +/// A not-yet-accepted inbound (from counterparty) channel. Once +/// accepted, the parameters will be used to construct a channel. +pub(super) struct InboundChannelRequest { + /// The original OpenChannel message. + pub open_channel_msg: msgs::OpenChannel, + /// The number of ticks remaining before the request expires. + pub ticks_remaining: i32, +} + +/// The number of ticks that may elapse while we're waiting for an unaccepted inbound channel to be +/// accepted. An unaccepted channel that exceeds this limit will be abandoned. +const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2; + /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is /// actually ours and not some duplicate HTLC sent to us by a node along the route. /// @@ -1122,7 +1160,11 @@ where /// could be in the middle of being processed without the direct mutex held. /// /// See `ChannelManager` struct-level documentation for lock order requirements. + #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) pending_events: Mutex)>>, + /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously. pending_events_processor: AtomicBool, @@ -1429,9 +1471,15 @@ pub struct ChannelDetails { /// /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, - /// The `user_channel_id` passed in to create_channel, or a random value if the channel was - /// inbound. This may be zero for inbound channels serialized with LDK versions prior to - /// 0.0.113. + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if + /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise + /// `user_channel_id` will be randomized for an inbound channel. This may be zero for objects + /// serialized with LDK versions prior to 0.0.113. + /// + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel + /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels pub user_channel_id: u128, /// The currently negotiated fee rate denominated in satoshi per 1000 weight units, /// which is applied to commitment and HTLC transactions. @@ -2580,6 +2628,12 @@ where self.finish_force_close_channel(chan.context.force_shutdown(false)); // Unfunded channel has no update (None, chan.context.get_counterparty_node_id()) + } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() { + log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); + // N.B. that we don't send any channel close event here: we + // don't have a user_channel_id, and we never sent any opening + // events anyway. + (None, *peer_node_id) } else { return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) }); } @@ -2860,9 +2914,9 @@ where short_channel_id, amt_to_forward, outgoing_cltv_value }, .. } => { - let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, + let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key.unwrap(), &shared_secret); - (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk)) + (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk)) }, // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the // inbound channel's state. @@ -3747,6 +3801,7 @@ where if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, + user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, @@ -3794,6 +3849,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -3898,7 +3954,7 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, .. @@ -3907,6 +3963,7 @@ where log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -3988,6 +4045,7 @@ where let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4017,6 +4075,7 @@ where ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, + user_channel_id: $htlc.prev_hop.user_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, @@ -4493,6 +4552,21 @@ where peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick( chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events)); + for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() { + if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 { + log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..])); + peer_state.pending_msg_events.push( + events::MessageSendEvent::HandleError { + node_id: counterparty_node_id, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() } + }, + } + ); + } + } + peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0); + if peer_state.ok_to_remove(true) { pending_peers_awaiting_removal.push(counterparty_node_id); } @@ -4733,7 +4807,7 @@ where &self.pending_events, &self.logger) { self.push_pending_forwards_ev(); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); @@ -4820,9 +4894,11 @@ where } } + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), - payment_purpose: payment.purpose, receiver_node_id, + payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value }); if dup_purpose.is_some() { debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); @@ -5034,12 +5110,18 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_outpoint: OutPoint) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); - self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); + let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: next_channel_outpoint, + counterparty_node_id: path.hops[0].pubkey, + }; + self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, + session_priv, path, from_onchain, ev_completion_action, &self.pending_events, + &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; @@ -5055,7 +5137,7 @@ where fee_earned_msat, claim_from_onchain_tx: from_onchain, prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_id), + next_channel_id: Some(next_channel_outpoint.to_channel_id()), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: None, @@ -5080,9 +5162,20 @@ where match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment { + if let Some(ClaimingPayment { + amount_msat, + payment_purpose: purpose, + receiver_node_id, + htlcs, + sender_intended_value: sender_intended_total_msat, + }) = payment { self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed { - payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id), + payment_hash, + purpose, + amount_msat, + receiver_node_id: Some(receiver_node_id), + htlcs, + sender_intended_total_msat, }, None)); } }, @@ -5272,49 +5365,61 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let is_only_peer_channel = peer_state.total_channel_count() == 1; - match peer_state.inbound_v1_channel_by_id.entry(temporary_channel_id.clone()) { - hash_map::Entry::Occupied(mut channel) => { - if !channel.get().is_awaiting_accept() { - return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() }); + + // Find (and remove) the channel in the unaccepted table. If it's not there, something weird is + // happening and return an error. N.B. that we create channel with an outbound SCID of zero so + // that we can delay allocating the SCID until after we're sure that the checks below will + // succeed. + let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) { + Some(unaccepted_channel) => { + let best_block_height = self.best_block.read().unwrap().height(); + InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, + counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, + &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height, + &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() }) + } + _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() }) + }?; + + if accept_0conf { + // This should have been correctly configured by the call to InboundV1Channel::new. + debug_assert!(channel.context.minimum_depth().unwrap() == 0); + } else if channel.context.get_channel_type().requires_zero_conf() { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.context.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } } - if accept_0conf { - channel.get_mut().set_0conf(); - } else if channel.get().context.get_channel_type().requires_zero_conf() { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), } - } - }; - peer_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); - } else { - // If this peer already has some channels, a new channel won't increase our number of peers - // with unfunded channels, so as long as we aren't over the maximum number of unfunded - // channels per-peer we can accept channels from a peer with existing ones. - if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { - let send_msg_err_event = events::MessageSendEvent::HandleError { - node_id: channel.get().context.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } - } - }; - peer_state.pending_msg_events.push(send_msg_err_event); - let _ = remove_channel!(self, channel); - return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + }; + peer_state.pending_msg_events.push(send_msg_err_event); + return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + } else { + // If this peer already has some channels, a new channel won't increase our number of peers + // with unfunded channels, so as long as we aren't over the maximum number of unfunded + // channels per-peer we can accept channels from a peer with existing ones. + if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { + let send_msg_err_event = events::MessageSendEvent::HandleError { + node_id: channel.context.get_counterparty_node_id(), + action: msgs::ErrorAction::SendErrorMessage{ + msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } } - } - - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: channel.get().context.get_counterparty_node_id(), - msg: channel.get_mut().accept_inbound_channel(user_channel_id), - }); - } - hash_map::Entry::Vacant(_) => { - return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }); + }; + peer_state.pending_msg_events.push(send_msg_err_event); + return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); } } + + // Now that we know we have a channel, assign an outbound SCID alias. + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + channel.context.set_outbound_scid_alias(outbound_scid_alias); + + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: channel.context.get_counterparty_node_id(), + msg: channel.accept_inbound_channel(), + }); + + peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel); + Ok(()) } @@ -5359,7 +5464,7 @@ where num_unfunded_channels += 1; } } - num_unfunded_channels + num_unfunded_channels + peer.inbound_channel_request_by_id.len() } fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> { @@ -5371,11 +5476,6 @@ where return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); } - let mut random_bytes = [0u8; 16]; - random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); - let user_channel_id = u128::from_be_bytes(random_bytes); - let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); - // Get the number of peers with channels, but without funded ones. We don't care too much // about peers that never open a channel, so we filter by peers that have at least one // channel, and then limit the number of those with unfunded channels. @@ -5410,46 +5510,59 @@ where msg.temporary_channel_id.clone())); } + let channel_id = msg.temporary_channel_id; + let channel_exists = peer_state.has_channel(&channel_id); + if channel_exists { + return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())); + } + + // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. + if self.default_configuration.manually_accept_inbound_channels { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::OpenChannelRequest { + temporary_channel_id: msg.temporary_channel_id.clone(), + counterparty_node_id: counterparty_node_id.clone(), + funding_satoshis: msg.funding_satoshis, + push_msat: msg.push_msat, + channel_type: msg.channel_type.clone().unwrap(), + }, None)); + peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest { + open_channel_msg: msg.clone(), + ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS, + }); + return Ok(()); + } + + // Otherwise create the channel right now. + let mut random_bytes = [0u8; 16]; + random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); let mut channel = match InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, - &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias) + &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false) { Err(e) => { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)); }, Ok(res) => res }; - let channel_id = channel.context.channel_id(); - let channel_exists = peer_state.has_channel(&channel_id); - if channel_exists { - self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias); - return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone())) - } else { - if !self.default_configuration.manually_accept_inbound_channels { - let channel_type = channel.context.get_channel_type(); - if channel_type.requires_zero_conf() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); - } - if channel_type.requires_anchors_zero_fee_htlc_tx() { - return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone())); - } - peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { - node_id: counterparty_node_id.clone(), - msg: channel.accept_inbound_channel(user_channel_id), - }); - } else { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push_back((events::Event::OpenChannelRequest { - temporary_channel_id: msg.temporary_channel_id.clone(), - counterparty_node_id: counterparty_node_id.clone(), - funding_satoshis: msg.funding_satoshis, - push_msat: msg.push_msat, - channel_type: channel.context.get_channel_type().clone(), - }, None)); - } - peer_state.inbound_v1_channel_by_id.insert(channel_id, channel); + + let channel_type = channel.context.get_channel_type(); + if channel_type.requires_zero_conf() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone())); } + if channel_type.requires_anchors_zero_fee_htlc_tx() { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone())); + } + + let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); + channel.context.set_outbound_scid_alias(outbound_scid_alias); + + peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel { + node_id: counterparty_node_id.clone(), + msg: channel.accept_inbound_channel(), + }); + peer_state.inbound_v1_channel_by_id.insert(channel_id, channel); Ok(()) } @@ -5810,6 +5923,7 @@ where } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { + let funding_txo; let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -5821,12 +5935,14 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) + let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan); + funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded"); + res }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo); Ok(()) } @@ -5937,6 +6053,7 @@ where log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, @@ -6023,10 +6140,18 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().context.get_funding_txo(); - let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan); + let funding_txo_opt = chan.get().context.get_funding_txo(); + let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { + self.raa_monitor_updates_held( + &peer_state.actions_blocking_raa_monitor_updates, funding_txo, + *counterparty_node_id) + } else { false }; + let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan); let res = if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, + let funding_txo = funding_txo_opt + .expect("Funding outpoint must have been set for RAA handling to succeed"); + handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan).map(|_| ()) } else { Ok(()) }; (htlcs_to_fail, res) @@ -6201,7 +6326,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -7055,6 +7180,7 @@ where if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER { let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: htlc.prev_short_channel_id, + user_channel_id: Some(htlc.prev_user_channel_id), htlc_id: htlc.prev_htlc_id, incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -7281,6 +7407,9 @@ where self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer); false }); + // Note that we don't bother generating any events for pre-accept channels - + // they're not considered "channels" yet from the PoV of our events interface. + peer_state.inbound_channel_request_by_id.clear(); pending_msg_events.retain(|msg| { match msg { // V1 Channel Establishment @@ -7364,6 +7493,7 @@ where channel_by_id: HashMap::new(), outbound_v1_channel_by_id: HashMap::new(), inbound_v1_channel_by_id: HashMap::new(), + inbound_channel_request_by_id: HashMap::new(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -7416,6 +7546,46 @@ where fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + match &msg.data as &str { + "cannot co-op close channel w/ active htlcs"| + "link failed to shutdown" => + { + // LND hasn't properly handled shutdown messages ever, and force-closes any time we + // send one while HTLCs are still present. The issue is tracked at + // https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches + // to fix it but none so far have managed to land upstream. The issue appears to be + // very low priority for the LND team despite being marked "P1". + // We're not going to bother handling this in a sensible way, instead simply + // repeating the Shutdown message on repeat until morale improves. + if msg.channel_id != [0; 32] { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); + if peer_state_mutex_opt.is_none() { return; } + let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); + if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) { + if let Some(msg) = chan.get_outbound_shutdown() { + peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { + node_id: *counterparty_node_id, + msg, + }); + } + peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: *counterparty_node_id, + action: msgs::ErrorAction::SendWarningMessage { + msg: msgs::WarningMessage { + channel_id: msg.channel_id, + data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned() + }, + log_level: Level::Trace, + } + }); + } + } + return; + } + _ => {} + } + if msg.channel_id == [0; 32] { let channel_ids: Vec<[u8; 32]> = { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -7423,6 +7593,9 @@ where if peer_state_mutex_opt.is_none() { return; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; + // Note that we don't bother generating any events for pre-accept channels - + // they're not considered "channels" yet from the PoV of our events interface. + peer_state.inbound_channel_request_by_id.clear(); peer_state.channel_by_id.keys().cloned() .chain(peer_state.outbound_v1_channel_by_id.keys().cloned()) .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect() @@ -7823,7 +7996,8 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (1, phantom_shared_secret, option), (2, outpoint, required), (4, htlc_id, required), - (6, incoming_packet_shared_secret, required) + (6, incoming_packet_shared_secret, required), + (7, user_channel_id, option), }); impl Writeable for ClaimableHTLC { @@ -8567,6 +8741,7 @@ where channel_by_id, outbound_v1_channel_by_id: HashMap::new(), inbound_v1_channel_by_id: HashMap::new(), + inbound_channel_request_by_id: HashMap::new(), latest_features: InitFeatures::empty(), pending_msg_events: Vec::new(), in_flight_monitor_updates: BTreeMap::new(), @@ -8902,7 +9077,13 @@ where // generating a `PaymentPathSuccessful` event but regenerating // it and the `PaymentSent` on every restart until the // `ChannelMonitor` is removed. - pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + let compl_action = + EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: monitor.get_funding_txo().0, + counterparty_node_id: path.hops[0].pubkey, + }; + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, + path, false, compl_action, &pending_events, &args.logger); pending_events_read = pending_events.into_inner().unwrap(); } }, @@ -8923,7 +9104,7 @@ where // downstream chan is closed (because we don't have a // channel_id -> peer map entry). counterparty_opt.is_none(), - monitor.get_funding_txo().0.to_channel_id())) + monitor.get_funding_txo().0)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -9070,7 +9251,7 @@ where .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } - for claimable_htlc in payment.htlcs { + for claimable_htlc in &payment.htlcs { claimable_amt_msat += claimable_htlc.value; // Add a holding-cell claim of the payment to the Channel, which should be @@ -9106,6 +9287,8 @@ where payment_hash, purpose: payment.purpose, amount_msat: claimable_amt_msat, + htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), + sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), }, None)); } } @@ -9192,12 +9375,12 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, downstream_chan_id); + downstream_closed, downstream_funding); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -9375,6 +9558,7 @@ mod tests { let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -9402,16 +9586,8 @@ mod tests { // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => { - assert_eq!(Some(payment_id), *id); - assert_eq!(payment_preimage, *preimage); - assert_eq!(our_payment_hash, *hash); - }, - _ => panic!("Unexpected event"), - } - match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); @@ -9419,7 +9595,7 @@ mod tests { }, _ => panic!("Unexpected event"), } - match events[2] { + match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); @@ -10287,7 +10463,9 @@ mod tests { let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); + // Since nodes[1] should not have accepted the channel, it should + // not have generated any events. + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } #[test]