X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=d0badb8b2cc7ca8e0a3688740c2a24aefe2c5f6d;hb=a1c30041360905e5edcb343447cf1d5872188914;hp=7fe99607528f1bef92007199dc4c04417161c167;hpb=5eea3058c921aa4c0739dc10565ee8d236302274;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7fe99607..d0badb8b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -43,14 +43,12 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; +use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext}; use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; -use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; -use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; -use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails}; +use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; +use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; @@ -65,8 +63,9 @@ use crate::offers::merkle::SignError; use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; -use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message}; -use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider}; +use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message}; +use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; +use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; @@ -75,6 +74,13 @@ use crate::util::string::UntrustedString; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::errors::APIError; +#[cfg(not(c_bindings))] +use { + crate::routing::router::DefaultRouter, + crate::routing::gossip::NetworkGraph, + crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}, + crate::sign::KeysManager, +}; use alloc::collections::{btree_map, BTreeMap}; @@ -2878,6 +2884,7 @@ where reason: shutdown_res.closure_reason, counterparty_node_id: Some(shutdown_res.counterparty_node_id), channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + channel_funding_txo: shutdown_res.channel_funding_txo, }, None)); if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { @@ -3228,14 +3235,14 @@ where // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } }, onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => { match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac, new_packet_bytes, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) } } } @@ -4303,7 +4310,7 @@ where current_height, self.default_configuration.accept_mpp_keysend) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), - Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -4345,7 +4352,7 @@ where if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let logger = WithChannelContext::from(&self.logger, &chan.context); for forward_info in pending_forwards.drain(..) { - match forward_info { + let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { @@ -4391,40 +4398,35 @@ where )); continue; } + None }, HTLCForwardInfo::AddHTLC { .. } => { panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_htlc( - htlc_id, err_packet, &&logger - ) { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) }, HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) { - if let ChannelError::Ignore(msg) = e { - log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met"); - } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - } + log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); + let res = chan.queue_fail_malformed_htlc( + htlc_id, failure_code, sha256_of_onion, &&logger + ); + Some((res, htlc_id)) }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + } } } } else { @@ -5995,13 +5997,20 @@ where } fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> { + + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id)); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 }); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| { + let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id); + log_error!(logger, "{}", err_str); + + APIError::ChannelUnavailable { err: err_str } + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let is_only_peer_channel = peer_state.total_channel_count() == 1; @@ -6016,9 +6025,19 @@ where InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height, - &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() }) + &self.logger, accept_0conf).map_err(|e| { + let err_str = e.to_string(); + log_error!(logger, "{}", err_str); + + APIError::ChannelUnavailable { err: err_str } + }) + } + _ => { + let err_str = "No such channel awaiting to be accepted.".to_owned(); + log_error!(logger, "{}", err_str); + + Err(APIError::APIMisuseError { err: err_str }) } - _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() }) }?; if accept_0conf { @@ -6032,7 +6051,10 @@ where } }; peer_state.pending_msg_events.push(send_msg_err_event); - return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() }); + let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned(); + log_error!(logger, "{}", err_str); + + return Err(APIError::APIMisuseError { err: err_str }); } else { // If this peer already has some channels, a new channel won't increase our number of peers // with unfunded channels, so as long as we aren't over the maximum number of unfunded @@ -6045,7 +6067,10 @@ where } }; peer_state.pending_msg_events.push(send_msg_err_event); - return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() }); + let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); + log_error!(logger, "{}", err_str); + + return Err(APIError::APIMisuseError { err: err_str }); } } @@ -6168,13 +6193,18 @@ where // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept. if self.default_configuration.manually_accept_inbound_channels { + let channel_type = channel::channel_type_from_open_channel( + &msg, &peer_state.latest_features, &self.channel_type_features() + ).map_err(|e| + MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id) + )?; let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::OpenChannelRequest { temporary_channel_id: msg.temporary_channel_id.clone(), counterparty_node_id: counterparty_node_id.clone(), funding_satoshis: msg.funding_satoshis, push_msat: msg.push_msat, - channel_type: msg.channel_type.clone().unwrap(), + channel_type, }, None)); peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest { open_channel_msg: msg.clone(), @@ -6374,7 +6404,7 @@ where let res = chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); match res { - Ok((chan, monitor)) => { + Ok((mut chan, monitor)) => { if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping @@ -6386,6 +6416,11 @@ where Ok(()) } else { let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + // We weren't able to watch the channel to begin with, so no + // updates should be made on it. Previously, full_stack_target + // found an (unreachable) panic when the monitor update contained + // within `shutdown_finish` was applied. + chan.unset_funding_info(msg.channel_id); return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1); } }, @@ -7744,15 +7779,15 @@ where let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret) .map_err(|_| Bolt12SemanticError::MissingPaths)?; - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = refund.respond_using_derived_keys( payment_paths, payment_hash, expanded_key, entropy )?; - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let created_at = Duration::from_secs( self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = refund.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at, expanded_key, entropy )?; @@ -8970,13 +9005,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)| - if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { - // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted - // (so won't be recovered after a crash), they shouldn't exist here and we would never need to - // worry about closing and removing them. - debug_assert!(false); - None - } + if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } ).for_each(|chan| { let logger = WithChannelContext::from(&self.logger, &chan.context); pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish { @@ -9195,17 +9224,17 @@ where }, }; - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let created_at = Duration::from_secs( self.highest_seen_timestamp.load(Ordering::Acquire) as u64 ); if invoice_request.keys.is_some() { - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = invoice_request.respond_using_derived_keys( payment_paths, payment_hash ); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = invoice_request.respond_using_derived_keys_no_std( payment_paths, payment_hash, created_at ); @@ -9214,9 +9243,9 @@ where Err(error) => Some(OffersMessage::InvoiceError(error.into())), } } else { - #[cfg(not(feature = "no-std"))] + #[cfg(feature = "std")] let builder = invoice_request.respond_with(payment_paths, payment_hash); - #[cfg(feature = "no-std")] + #[cfg(not(feature = "std"))] let builder = invoice_request.respond_with_no_std( payment_paths, payment_hash, created_at ); @@ -10299,6 +10328,7 @@ where reason: ClosureReason::OutdatedChannelManager, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; @@ -10352,6 +10382,7 @@ where reason: ClosureReason::DisconnectedPeer, counterparty_node_id: Some(channel.context.get_counterparty_node_id()), channel_capacity_sats: Some(channel.context.get_value_satoshis()), + channel_funding_txo: channel.context.get_funding_txo(), }, None)); } else { log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); @@ -12091,8 +12122,8 @@ mod tests { let sender_intended_amt_msat = 100; let extra_fee_msat = 10; let hop_data = msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12103,7 +12134,7 @@ mod tests { // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. let current_height: u32 = node[0].node.best_block.read().unwrap().height(); - if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), current_height, node[0].node.default_configuration.accept_mpp_keysend) @@ -12113,8 +12144,8 @@ mod tests { // If amt_received + extra_fee is equal to the sender intended amount, we're fine. let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone - amt_msat: 100, - outgoing_cltv_value: 42, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 42, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12137,8 +12168,8 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height(); let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { - amt_msat: 100, - outgoing_cltv_value: 22, + sender_intended_htlc_amt_msat: 100, + cltv_expiry_height: 22, payment_metadata: None, keysend_preimage: None, payment_data: Some(msgs::FinalOnionHopData { @@ -12496,7 +12527,7 @@ pub mod bench { let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); let scorer = RwLock::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer); + let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer); let mut config: UserConfig = Default::default(); config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);