#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
- use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
+ use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
struct MsgHandleErrInternal {
err: msgs::LightningError,
- chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
+ closes_channel: bool,
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
- channel_capacity: Option<u64>,
}
impl MsgHandleErrInternal {
#[inline]
},
},
},
- chan_id: None,
+ closes_channel: false,
shutdown_finish: None,
- channel_capacity: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
+ Self { err, closes_channel: false, shutdown_finish: None }
}
#[inline]
- fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+ fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
let action = if shutdown_res.monitor_update.is_some() {
// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
};
Self {
err: LightningError { err, action },
- chan_id: Some((channel_id, user_channel_id)),
+ closes_channel: true,
shutdown_finish: Some((shutdown_res, channel_update)),
- channel_capacity: Some(channel_capacity)
}
}
#[inline]
},
},
},
- chan_id: None,
+ closes_channel: false,
shutdown_finish: None,
- channel_capacity: None,
}
}
fn closes_channel(&self) -> bool {
- self.chan_id.is_some()
+ self.closes_channel
}
}
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
+ Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
let mut msg_events = Vec::with_capacity(2);
if let Some((shutdown_res, update_option)) = shutdown_finish {
+ let counterparty_node_id = shutdown_res.counterparty_node_id;
+ let channel_id = shutdown_res.channel_id;
+ let logger = WithContext::from(
+ &$self.logger, Some(counterparty_node_id), Some(channel_id),
+ );
+ log_error!(logger, "Force-closing channel: {}", err.err);
+
$self.finish_close_channel(shutdown_res);
if let Some(update) = update_option {
msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- if let Some((channel_id, user_channel_id)) = chan_id {
- $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
- channel_id, user_channel_id,
- reason: ClosureReason::ProcessingError { err: err.err.clone() },
- counterparty_node_id: Some($counterparty_node_id),
- channel_capacity_sats: channel_capacity,
- }, None));
- }
+ } else {
+ log_error!($self.logger, "Got non-closing error: {}", err.err);
}
- let logger = WithContext::from(
- &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
- );
- log_error!(logger, "{}", err.err);
if let msgs::ErrorAction::IgnoreError = err.action {
} else {
msg_events.push(events::MessageSendEvent::HandleError {
let logger = WithChannelContext::from(&$self.logger, &$channel.context);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
- let shutdown_res = $channel.context.force_shutdown(true);
- let user_id = $channel.context.get_user_id();
- let channel_capacity_satoshis = $channel.context.get_value_satoshis();
-
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
- shutdown_res, $channel_update, channel_capacity_satoshis))
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = $channel.context.force_shutdown(true, reason);
+ let err =
+ MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
+ (true, err)
},
}
};
.collect()
}
- /// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
- let mut pending_events_lock = self.pending_events.lock().unwrap();
- match context.unbroadcasted_funding() {
- Some(transaction) => {
- pending_events_lock.push_back((events::Event::DiscardFunding {
- channel_id: context.channel_id(), transaction
- }, None));
- },
- None => {},
- }
- pending_events_lock.push_back((events::Event::ChannelClosed {
- channel_id: context.channel_id(),
- user_channel_id: context.get_user_id(),
- reason: closure_reason,
- counterparty_node_id: Some(context.get_counterparty_node_id()),
- channel_capacity_sats: Some(context.get_value_satoshis()),
- }, None));
- }
-
fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
- self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
- shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+ shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
}
},
hash_map::Entry::Vacant(_) => {
let logger = WithContext::from(
&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
);
- log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+
+ log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
+ shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
}
}
has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
"Closing a batch where all channels have completed initial monitor update",
);
}
+
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push_back((events::Event::ChannelClosed {
+ channel_id: shutdown_res.channel_id,
+ user_channel_id: shutdown_res.user_channel_id,
+ reason: shutdown_res.closure_reason,
+ counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+ channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+ }, None));
+
+ if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
+ pending_events.push_back((events::Event::DiscardFunding {
+ channel_id: shutdown_res.channel_id, transaction
+ }, None));
+ }
+ }
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
- self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
mem::drop(peer_state);
mem::drop(per_peer_state);
match chan_phase {
ChannelPhase::Funded(mut chan) => {
- self.finish_close_channel(chan.context.force_shutdown(broadcast));
+ self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
(self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
},
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
- self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+ self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
// Unfunded channel has no update
(None, chan_phase.context().get_counterparty_node_id())
},
// delay) once they've send us a commitment_signed!
PendingHTLCStatus::Forward(info)
},
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
Ok(info) => PendingHTLCStatus::Forward(info),
- Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+ Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
}
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let funding_txo;
- let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
funding_txo = find_funding_output(&chan, &funding_transaction)?;
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
.map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
let channel_id = chan.context.channel_id();
- let user_id = chan.context.get_user_id();
- let shutdown_res = chan.context.force_shutdown(false);
- let channel_capacity = chan.context.get_value_satoshis();
- (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
+ let reason = ClosureReason::ProcessingError { err: msg.clone() };
+ let shutdown_res = chan.context.force_shutdown(false, reason);
+ (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
} else { unreachable!(); });
match funding_res {
Ok(funding_msg) => (chan, funding_msg),
},
hash_map::Entry::Vacant(e) => {
let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
- if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() {
- panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible");
+ match outpoint_to_peer.entry(funding_txo) {
+ hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
+ hash_map::Entry::Occupied(o) => {
+ let err = format!(
+ "An existing channel using outpoint {} is open with peer {}",
+ funding_txo, o.get()
+ );
+ mem::drop(outpoint_to_peer);
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ let reason = ClosureReason::ProcessingError { err: err.clone() };
+ self.finish_close_channel(chan.context.force_shutdown(true, reason));
+ return Err(APIError::ChannelUnavailable { err });
+ }
}
e.insert(ChannelPhase::UnfundedOutboundV1(chan));
}
.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
.map(|mut chan| {
update_maps_on_chan_removal!(self, &chan.context());
- self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
- shutdown_results.push(chan.context_mut().force_shutdown(false));
+ let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+ shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
});
}
}
current_height, self.default_configuration.accept_mpp_keysend)
{
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
- Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let logger = WithChannelContext::from(&self.logger, &chan.context);
for forward_info in pending_forwards.drain(..) {
- match forward_info {
+ let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
forward_info: PendingHTLCInfo {
));
continue;
}
+ None
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- if let Err(e) = chan.queue_fail_htlc(
- htlc_id, err_packet, &&logger
- ) {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- }
+ Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
},
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
- log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- }
+ log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ let res = chan.queue_fail_malformed_htlc(
+ htlc_id, failure_code, sha256_of_onion, &&logger
+ );
+ Some((res, htlc_id))
},
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ }
}
}
} else {
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
- shutdown_channels.push(context.force_shutdown(false));
+ shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
pending_msg_events.push(MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
- let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
+ let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
log_error!(logger, "{}", err_str);
- APIError::ChannelUnavailable { err: err_str }
+ APIError::ChannelUnavailable { err: err_str }
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
APIError::ChannelUnavailable { err: err_str }
})
}
- _ => {
+ _ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
let res =
chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
match res {
- Ok((chan, monitor)) => {
+ Ok((mut chan, monitor)) => {
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
// We really should be able to insert here without doing a second
// lookup, but sadly rust stdlib doesn't currently allow keeping
Ok(())
} else {
let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+ // We weren't able to watch the channel to begin with, so no
+ // updates should be made on it. Previously, full_stack_target
+ // found an (unreachable) panic when the monitor update contained
+ // within `shutdown_finish` was applied.
+ chan.unset_funding_info(msg.channel_id);
return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
}
},
let context = phase.context_mut();
let logger = WithChannelContext::from(&self.logger, context);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
- finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+ finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
},
}
} else {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
}
mem::drop(per_peer_state);
if let Some(shutdown_result) = shutdown_result {
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false));
+ failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
});
}
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-
log_info!(logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transactions(&[&tx]);
update_maps_on_chan_removal!(self, &chan.context);
update_maps_on_chan_removal!(self, &channel.context);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
- failed_channels.push(channel.context.force_shutdown(true));
+ let reason_message = format!("{}", reason);
+ failed_channels.push(channel.context.force_shutdown(true, reason));
if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- let reason_message = format!("{}", reason);
- self.issue_channel_close_events(&channel.context, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
};
// Clean up for removal.
update_maps_on_chan_removal!(self, &context);
- self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
- failed_channels.push(context.force_shutdown(false));
+ failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
false
});
// Note that we don't bother generating any events for pre-accept channels -
log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
}
- let mut shutdown_result = channel.context.force_shutdown(true);
+ let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
// If we were persisted and shut down while the initial ChannelMonitor persistence
// was in-progress, we never broadcasted the funding transaction and can still
// safely discard the channel.
- let _ = channel.context.force_shutdown(false);
+ let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
channel_closures.push_back((events::Event::ChannelClosed {
channel_id: channel.context.channel_id(),
user_channel_id: channel.context.get_user_id(),
let sender_intended_amt_msat = 100;
let extra_fee_msat = 10;
let hop_data = msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
// Check that if the amount we received + the penultimate hop extra fee is less than the sender
// intended amount, we fail the payment.
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
- if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
+ if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
current_height, node[0].node.default_configuration.accept_mpp_keysend)
// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
- amt_msat: 100,
- outgoing_cltv_value: 42,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 42,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
let current_height: u32 = node[0].node.best_block.read().unwrap().height();
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
- amt_msat: 100,
- outgoing_cltv_value: 22,
+ sender_intended_htlc_amt_msat: 100,
+ cltv_expiry_height: 22,
payment_metadata: None,
keysend_preimage: None,
payment_data: Some(msgs::FinalOnionHopData {
//check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
//Should produce and error.
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
- nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
// Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
- nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+ nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
assert_eq!(nodes[0].node.list_channels().len(), 0);
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
// Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
- nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
assert_eq!(nodes[1].node.list_channels().len(), 1);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
- match events[0] {
- Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
- _ => panic!("Unexepected event"),
- }
- match events[1] {
- Event::PaymentPathFailed { ref payment_hash, .. } => {
- assert_eq!(*payment_hash, fourth_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
- match events[2] {
- Event::PaymentFailed { ref payment_hash, .. } => {
- assert_eq!(*payment_hash, fourth_payment_hash);
- },
- _ => panic!("Unexpected event"),
- }
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
+ )));
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+ )));
+ assert!(events.iter().any(|ev| matches!(
+ ev,
+ Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+ )));
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
updates.update_add_htlcs[0].amount_msat = 0;
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
check_closed_broadcast!(nodes[1], true).unwrap();
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
// Edit amt_to_forward to simulate the sender having set
// the final amount and the routing node taking less fee
- if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
- *amt_msat = 99_000;
+ if let msgs::OutboundOnionPayload::Receive {
+ ref mut sender_intended_htlc_amt_msat, ..
+ } = onion_payloads[1] {
+ *sender_intended_htlc_amt_msat = 99_000;
} else { panic!() }
let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
payment_event.msgs[0].onion_routing_packet = new_onion_packet;
}
}
+#[test]
+fn test_peer_funding_sidechannel() {
+ // Test that if a peer somehow learns which txid we'll use for our channel funding before we
+ // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
+ // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
+ // the txid and panicked if the peer tried to open a redundant channel to us with the same
+ // funding outpoint.
+ //
+ // While this assumption is generally safe, some users may have out-of-band protocols where
+ // they notify their LSP about a funding outpoint first, or this may be violated in the future
+ // with collaborative transaction construction protocols, i.e. dual-funding.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+ let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
+
+ let (_, tx, funding_output) =
+ create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+ let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(cs_funding_events.len(), 1);
+ match cs_funding_events[0] {
+ Event::FundingGenerationReady { .. } => {}
+ _ => panic!("Unexpected event {:?}", cs_funding_events),
+ }
+
+ nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
+ let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
+ get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
+ expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+
+ let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
+ let err_msg = format!("{:?}", res.unwrap_err());
+ assert!(err_msg.contains("An existing channel using outpoint "));
+ assert!(err_msg.contains(" is open with peer"));
+ // Even though the last funding_transaction_generated errored, it still generated a
+ // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
+ // appropriate error message.
+ let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
+ check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+ let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+
+ let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+ get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+}
+
+#[test]
+fn test_duplicate_conflicting_funding_from_second_peer() {
+ // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
+ // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
+ // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
+ // implies the user (and our counterparty) has reused cryptographic keys across channels, which
+ // we require the user not do.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ let (_, tx, funding_output) =
+ create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+ // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
+ // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
+ let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
+ let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
+ nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
+
+ nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+
+ let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+ expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+ // At this point, the channel should be closed, after having generated one monitor write (the
+ // watch_channel call which failed), but zero monitor updates.
+ check_added_monitors!(nodes[0], 1);
+ get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+ let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
+ check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
+}
+
#[test]
fn test_duplicate_funding_err_in_funding() {
// Test that if we have a live channel with one peer, then another peer comes along and tries
chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
},
_ => panic!("Unexpected ChannelPhase variant"),
- }
+ }.unwrap()
};
check_added_monitors!(nodes[0], 0);
- nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
// At this point we'll look up if the channel_id is present and immediately fail the channel
// without trying to persist the `ChannelMonitor`.
check_added_monitors!(nodes[1], 0);
check_closed_events(&nodes[1], &[
- ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError {
+ ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
err: "Already had channel with the new channel_id".to_owned()
})
]);
/// Used in decrypting the onion packet's payload.
pub blinding_point: PublicKey,
/// The full onion packet including hop data, pubkey, and hmac
- pub onion_routing_packet: onion_message::Packet,
+ pub onion_routing_packet: onion_message::packet::Packet,
}
/// An [`update_fulfill_htlc`] message to be sent to or received from a peer.
payment_metadata: Option<Vec<u8>>,
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
- amt_msat: u64,
- outgoing_cltv_value: u32,
+ sender_intended_htlc_amt_msat: u64,
+ cltv_expiry_height: u32,
},
BlindedForward {
short_channel_id: u64,
intro_node_blinding_point: PublicKey,
},
BlindedReceive {
- amt_msat: u64,
+ sender_intended_htlc_amt_msat: u64,
total_msat: u64,
- outgoing_cltv_value: u32,
+ cltv_expiry_height: u32,
payment_secret: PaymentSecret,
payment_constraints: PaymentConstraints,
intro_node_blinding_point: Option<PublicKey>,
payment_metadata: Option<Vec<u8>>,
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
- amt_msat: u64,
- outgoing_cltv_value: u32,
+ sender_intended_htlc_amt_msat: u64,
+ cltv_expiry_height: u32,
},
BlindedForward {
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>,
},
BlindedReceive {
- amt_msat: u64,
+ sender_intended_htlc_amt_msat: u64,
total_msat: u64,
- outgoing_cltv_value: u32,
+ cltv_expiry_height: u32,
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
}
let blinding_point: PublicKey = Readable::read(r)?;
let len: u16 = Readable::read(r)?;
let mut packet_reader = FixedLengthReader::new(r, len as u64);
- let onion_routing_packet: onion_message::Packet = <onion_message::Packet as LengthReadable>::read(&mut packet_reader)?;
+ let onion_routing_packet: onion_message::packet::Packet =
+ <onion_message::packet::Packet as LengthReadable>::read(&mut packet_reader)?;
Ok(Self {
blinding_point,
onion_routing_packet,
});
},
Self::Receive {
- ref payment_data, ref payment_metadata, ref keysend_preimage, amt_msat,
- outgoing_cltv_value, ref custom_tlvs,
+ ref payment_data, ref payment_metadata, ref keysend_preimage, sender_intended_htlc_amt_msat,
+ cltv_expiry_height, ref custom_tlvs,
} => {
// We need to update [`ln::outbound_payment::RecipientOnionFields::with_custom_tlvs`]
// to reject any reserved types in the experimental range if new ones are ever
let mut custom_tlvs: Vec<&(u64, Vec<u8>)> = custom_tlvs.iter().chain(keysend_tlv.iter()).collect();
custom_tlvs.sort_unstable_by_key(|(typ, _)| *typ);
_encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
- (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
+ (4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(8, payment_data, option),
(16, payment_metadata.as_ref().map(|m| WithoutLength(m)), option)
}, custom_tlvs.iter());
});
},
Self::BlindedReceive {
- amt_msat, total_msat, outgoing_cltv_value, encrypted_tlvs,
+ sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, encrypted_tlvs,
intro_node_blinding_point,
} => {
_encode_varint_length_prefixed_tlv!(w, {
- (2, HighZeroBytesDroppedBigSize(*amt_msat), required),
- (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
+ (4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(10, *encrypted_tlvs, required_vec),
(12, intro_node_blinding_point, option),
(18, HighZeroBytesDroppedBigSize(*total_msat), required)
})} => {
if total_msat.unwrap_or(0) > MAX_VALUE_MSAT { return Err(DecodeError::InvalidValue) }
Ok(Self::BlindedReceive {
- amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ sender_intended_htlc_amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
total_msat: total_msat.ok_or(DecodeError::InvalidValue)?,
- outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+ cltv_expiry_height: cltv_value.ok_or(DecodeError::InvalidValue)?,
payment_secret,
payment_constraints,
intro_node_blinding_point,
payment_data,
payment_metadata: payment_metadata.map(|w| w.0),
keysend_preimage,
- amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
- outgoing_cltv_value: cltv_value.ok_or(DecodeError::InvalidValue)?,
+ sender_intended_htlc_amt_msat: amt.ok_or(DecodeError::InvalidValue)?,
+ cltv_expiry_height: cltv_value.ok_or(DecodeError::InvalidValue)?,
custom_tlvs,
})
}
payment_data: None,
payment_metadata: None,
keysend_preimage: None,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
custom_tlvs: vec![],
};
let encoded_value = outbound_msg.encode();
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
let inbound_msg = ReadableArgs::read(&mut Cursor::new(&target_value[..]), (None, &&node_signer)).unwrap();
if let msgs::InboundOnionPayload::Receive {
- payment_data: None, amt_msat, outgoing_cltv_value, ..
+ payment_data: None, sender_intended_htlc_amt_msat, cltv_expiry_height, ..
} = inbound_msg {
- assert_eq!(amt_msat, 0x0badf00d01020304);
- assert_eq!(outgoing_cltv_value, 0xffffffff);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
+ assert_eq!(cltv_expiry_height, 0xffffffff);
} else { panic!(); }
}
}),
payment_metadata: None,
keysend_preimage: None,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
custom_tlvs: vec![],
};
let encoded_value = outbound_msg.encode();
payment_secret,
total_msat: 0x1badca1f
}),
- amt_msat, outgoing_cltv_value,
+ sender_intended_htlc_amt_msat, cltv_expiry_height,
payment_metadata: None,
keysend_preimage: None,
custom_tlvs,
} = inbound_msg {
assert_eq!(payment_secret, expected_payment_secret);
- assert_eq!(amt_msat, 0x0badf00d01020304);
- assert_eq!(outgoing_cltv_value, 0xffffffff);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
+ assert_eq!(cltv_expiry_height, 0xffffffff);
assert_eq!(custom_tlvs, vec![]);
} else { panic!(); }
}
payment_metadata: None,
keysend_preimage: None,
custom_tlvs: bad_type_range_tlvs,
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
};
let encoded_value = msg.encode();
let node_signer = test_utils::TestKeysInterface::new(&[42; 32], Network::Testnet);
payment_metadata: None,
keysend_preimage: None,
custom_tlvs: expected_custom_tlvs.clone(),
- amt_msat: 0x0badf00d01020304,
- outgoing_cltv_value: 0xffffffff,
+ sender_intended_htlc_amt_msat: 0x0badf00d01020304,
+ cltv_expiry_height: 0xffffffff,
};
let encoded_value = msg.encode();
let target_value = <Vec<u8>>::from_hex("2e02080badf00d010203040404ffffffffff0000000146c6616b021234ff0000000146c6616f084242424242424242").unwrap();
payment_metadata: None,
keysend_preimage: None,
custom_tlvs,
- amt_msat,
- outgoing_cltv_value,
+ sender_intended_htlc_amt_msat,
+ cltv_expiry_height: outgoing_cltv_value,
..
} = inbound_msg {
assert_eq!(custom_tlvs, expected_custom_tlvs);
- assert_eq!(amt_msat, 0x0badf00d01020304);
+ assert_eq!(sender_intended_htlc_amt_msat, 0x0badf00d01020304);
assert_eq!(outgoing_cltv_value, 0xffffffff);
} else { panic!(); }
}