htlc_id: u64,
err_packet: msgs::OnionErrorPacket,
},
+ FailMalformedHTLC {
+ htlc_id: u64,
+ failure_code: u16,
+ sha256_of_onion: [u8; 32],
+ },
}
macro_rules! define_state_flags {
total_fee_sat: u64, // the total fee included in the transaction
num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
- local_balance_msat: u64, // local balance before fees but considering dust limits
- remote_balance_msat: u64, // remote balance before fees but considering dust limits
+ local_balance_msat: u64, // local balance before fees *not* considering dust limits
+ remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
}
/// The result of a shutdown that should be handled.
#[must_use]
pub(crate) struct ShutdownResult {
+ pub(crate) closure_reason: ClosureReason,
/// A channel monitor update to apply.
pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
/// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
/// propagated to the remainder of the batch.
pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
pub(crate) channel_id: ChannelId,
+ pub(crate) user_channel_id: u128,
+ pub(crate) channel_capacity_satoshis: u64,
pub(crate) counterparty_node_id: PublicKey,
+ pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
}
/// If the majority of the channels funds are to the fundee and the initiator holds only just
}
}
- let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
+ let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
assert!(value_to_self_msat >= 0);
// Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
// AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
// "violate" their reserve value by couting those against it. Thus, we have to convert
// everything to i64 before subtracting as otherwise we can overflow.
- let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
+ let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
assert!(value_to_remote_msat >= 0);
#[cfg(debug_assertions)]
htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
htlcs_included.append(&mut included_dust_htlcs);
- // For the stats, trimmed-to-0 the value in msats accordingly
- value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
- value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
-
CommitmentStats {
tx,
feerate_per_kw,
/// will sign and send to our counterparty.
/// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
fn build_remote_transaction_keys(&self) -> TxCreationKeys {
- //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
- //may see payments to it!
let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
let counterparty_pubkeys = self.get_counterparty_pubkeys();
if let Some(feerate) = outbound_feerate_update {
feerate_per_kw = cmp::max(feerate_per_kw, feerate);
}
- cmp::max(2530, feerate_per_kw * 1250 / 1000)
+ let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
+ cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
}
/// Get forwarding information for the counterparty.
res
}
- fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
- where F: Fn() -> Option<O> {
+ fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
match self.channel_state {
ChannelState::FundingNegotiated => f(),
- ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
- f()
- } else {
- None
- },
+ ChannelState::AwaitingChannelReady(flags) =>
+ if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
+ flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
+ {
+ f()
+ } else {
+ None
+ },
_ => None,
}
}
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
+ counterparty_node_id: Some(self.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
}))
} else { None }
} else { None };
let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
+ let unbroadcasted_funding_tx = self.unbroadcasted_funding();
self.channel_state = ChannelState::ShutdownComplete;
self.update_time_counter += 1;
ShutdownResult {
+ closure_reason,
monitor_update,
dropped_outbound_htlcs,
unbroadcasted_batch_funding_txid,
channel_id: self.channel_id,
+ user_channel_id: self.user_id,
+ channel_capacity_satoshis: self.channel_value_satoshis,
counterparty_node_id: self.counterparty_node_id,
+ unbroadcasted_funding_tx,
}
}
.ok();
if funding_signed.is_none() {
- log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
- self.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for funding_signed");
+ }
+ #[cfg(async_signing)] {
+ log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+ self.signer_pending_funding = true;
+ }
} else if self.signer_pending_funding {
log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
self.signer_pending_funding = false;
feerate: u32,
}
+/// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
+/// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
+trait FailHTLCContents {
+ type Message: FailHTLCMessageName;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
+ fn to_inbound_htlc_state(self) -> InboundHTLCState;
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
+}
+impl FailHTLCContents for msgs::OnionErrorPacket {
+ type Message = msgs::UpdateFailHTLC;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
+ }
+}
+impl FailHTLCContents for ([u8; 32], u16) {
+ type Message = msgs::UpdateFailMalformedHTLC;
+ fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
+ msgs::UpdateFailMalformedHTLC {
+ htlc_id,
+ channel_id,
+ sha256_of_onion: self.0,
+ failure_code: self.1
+ }
+ }
+ fn to_inbound_htlc_state(self) -> InboundHTLCState {
+ InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
+ }
+ fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
+ HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id,
+ sha256_of_onion: self.0,
+ failure_code: self.1
+ }
+ }
+}
+
+trait FailHTLCMessageName {
+ fn name() -> &'static str;
+}
+impl FailHTLCMessageName for msgs::UpdateFailHTLC {
+ fn name() -> &'static str {
+ "update_fail_htlc"
+ }
+}
+impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
+ fn name() -> &'static str {
+ "update_fail_malformed_htlc"
+ }
+}
+
impl<SP: Deref> Channel<SP> where
SP::Target: SignerProvider,
<SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_arg.clone(),
}],
return UpdateFulfillFetch::DuplicateClaim {};
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
// TODO: We may actually be able to switch to a fulfill here, though its
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
}
+ /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
+ /// want to fail blinded HTLCs where we are not the intro node.
+ ///
+ /// See [`Self::queue_fail_htlc`] for more info.
+ pub fn queue_fail_malformed_htlc<L: Deref>(
+ &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
+ ) -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
/// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
/// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
/// however, fail more than once as we wait for an upstream failure to be irrevocably committed
/// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
/// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
/// [`ChannelError::Ignore`].
- fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
+ &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
+ logger: &L
+ ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
}
return Ok(None);
}
},
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
+ {
if htlc_id_arg == htlc_id {
debug_assert!(false, "Tried to fail an HTLC that was already failed");
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
}
}
log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
- htlc_id: htlc_id_arg,
- err_packet,
- });
+ self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
return Ok(None);
}
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
+ log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
+ E::Message::name(), &self.context.channel_id());
{
let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ htlc.state = err_contents.clone().to_inbound_htlc_state();
}
- Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- reason: err_packet
- }))
+ Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
}
// Message handlers:
self.context.channel_state.clear_waiting_for_batch();
}
+ /// Unsets the existing funding information.
+ ///
+ /// This must only be used if the channel has not yet completed funding and has not been used.
+ ///
+ /// Further, the channel must be immediately shut down after this with a call to
+ /// [`ChannelContext::force_shutdown`].
+ pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
+ debug_assert!(matches!(
+ self.context.channel_state, ChannelState::AwaitingChannelReady(_)
+ ));
+ self.context.channel_transaction_parameters.funding_outpoint = None;
+ self.context.channel_id = temporary_channel_id;
+ }
+
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
/// and the channel is now usable (and public), this may generate an announcement_signatures to
/// reply with.
self.context.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
commitment_tx: holder_commitment_tx,
htlc_outputs: htlcs_and_sigs,
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: Vec::new(),
};
// the limit. In case it's less rare than I anticipate, we may want to revisit
// handling this case better and maybe fulfilling some of the HTLCs while attempting
// to rebalance channels.
- match &htlc_update {
+ let fail_htlc_res = match &htlc_update {
&HTLCUpdateAwaitingACK::AddHTLC {
amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
skimmed_fee_msat, blinding_point, ..
}
}
}
+ None
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
// If an HTLC claim was previously added to the holding cell (via
{ monitor_update } else { unreachable!() };
update_fulfill_count += 1;
monitor_update.updates.append(&mut additional_monitor_update.updates);
+ None
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
- match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
- Ok(update_fail_msg_option) => {
- // If an HTLC failure was previously added to the holding cell (via
- // `queue_fail_htlc`) then generating the fail message itself must
- // not fail - we should never end up in a state where we double-fail
- // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
- // for a full revocation before failing.
- debug_assert!(update_fail_msg_option.is_some());
- update_fail_count += 1;
- },
- Err(e) => {
- if let ChannelError::Ignore(_) = e {}
- else {
- panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
- }
- }
- }
+ Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
+ .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
},
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
+ .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
+ }
+ };
+ if let Some(res) = fail_htlc_res {
+ match res {
+ Ok(fail_msg_opt) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ debug_assert!(fail_msg_opt.is_some());
+ update_fail_count += 1;
+ },
+ Err(ChannelError::Ignore(_)) => {},
+ Err(_) => {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ },
+ }
}
}
if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
self.context.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
idx: self.context.cur_counterparty_commitment_transaction_number + 1,
secret: msg.per_commitment_secret,
/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
- #[allow(unused)]
+ #[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
let commitment_update = if self.context.signer_pending_commitment_update {
self.get_last_commitment_update_for_send(logger).ok()
}
update
} else {
- if !self.context.signer_pending_commitment_update {
- log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
- self.context.signer_pending_commitment_update = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new commitment state");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+ self.context.signer_pending_commitment_update = true;
+ }
+ return Err(());
}
- return Err(());
};
Ok(msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
if last_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
+ unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
};
let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
self.context.channel_state = ChannelState::ShutdownComplete;
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
let shutdown_result = ShutdownResult {
+ closure_reason: ClosureReason::CooperativeClosure,
monitor_update: None,
dropped_outbound_htlcs: Vec::new(),
unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
channel_id: self.context.channel_id,
+ user_channel_id: self.context.user_id,
+ channel_capacity_satoshis: self.context.channel_value_satoshis,
counterparty_node_id: self.context.counterparty_node_id,
+ unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
};
self.context.channel_state = ChannelState::ShutdownComplete;
self.context.update_time_counter += 1;
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
commitment_txid: counterparty_commitment_txid,
htlc_outputs: htlcs.clone(),
self.context.latest_monitor_update_id += 1;
let monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
+ counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
scriptpubkey: self.get_closing_scriptpubkey(),
}],
let funding_created = self.get_funding_created_msg(logger);
if funding_created.is_none() {
- if !self.context.signer_pending_funding {
- log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
- self.context.signer_pending_funding = true;
+ #[cfg(not(async_signing))] {
+ panic!("Failed to get signature for new funding creation");
+ }
+ #[cfg(async_signing)] {
+ if !self.context.signer_pending_funding {
+ log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+ self.context.signer_pending_funding = true;
+ }
}
}
/// Indicates that the signer may have some signatures for us, so we should retry if we're
/// blocked.
- #[allow(unused)]
+ #[cfg(async_signing)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
if self.context.signer_pending_funding && self.context.is_outbound() {
log_trace!(logger, "Signer unblocked a funding_created");
pub unfunded_context: UnfundedChannelContext,
}
+/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
+/// [`msgs::OpenChannel`].
+pub(super) fn channel_type_from_open_channel(
+ msg: &msgs::OpenChannel, their_features: &InitFeatures,
+ our_supported_features: &ChannelTypeFeatures
+) -> Result<ChannelTypeFeatures, ChannelError> {
+ if let Some(channel_type) = &msg.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
+
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ Ok(channel_type.clone())
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ Ok(channel_type)
+ }
+}
+
impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
// First check the channel type is known, failing before we do anything else if we don't
// support this channel type.
- let channel_type = if let Some(channel_type) = &msg.channel_type {
- if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
- }
-
- // We only support the channel types defined by the `ChannelManager` in
- // `provided_channel_type_features`. The channel type must always support
- // `static_remote_key`.
- if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
- }
- // Make sure we support all of the features behind the channel type.
- if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
- }
- if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
- }
- channel_type.clone()
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- channel_type
- };
+ let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
+ // Vec of (htlc_id, failure_code, sha256_of_onion)
+ let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
for update in self.context.holding_cell_htlc_updates.iter() {
match update {
htlc_id.write(writer)?;
err_packet.write(writer)?;
}
+ &HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code, sha256_of_onion
+ } => {
+ // We don't want to break downgrading by adding a new variant, so write a dummy
+ // `::FailHTLC` variant and write the real malformed error as an optional TLV.
+ malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
+
+ let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
+ 2u8.write(writer)?;
+ htlc_id.write(writer)?;
+ dummy_err_packet.write(writer)?;
+ }
}
}
(38, self.context.is_batch_funding, option),
(39, pending_outbound_blinding_points, optional_vec),
(41, holding_cell_blinding_points, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
Ok(())
let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
+ let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(38, is_batch_funding, option),
(39, pending_outbound_blinding_points_opt, optional_vec),
(41, holding_cell_blinding_points_opt, optional_vec),
+ (43, malformed_htlcs, optional_vec), // Added in 0.0.119
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
}
+ if let Some(malformed_htlcs) = malformed_htlcs {
+ for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
+ let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
+ if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
+ let matches = *htlc_id == malformed_htlc_id;
+ if matches { debug_assert!(err_packet.data.is_empty()) }
+ matches
+ } else { false }
+ }).ok_or(DecodeError::InvalidValue)?;
+ let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
+ };
+ let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
+ }
+ }
+
Ok(Channel {
context: ChannelContext {
user_id,
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::opcodes;
use bitcoin::network::constants::Network;
+ use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::{PaymentHash, PaymentPreimage};
use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
}
#[test]
- fn blinding_point_skimmed_fee_ser() {
- // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
+ fn blinding_point_skimmed_fee_malformed_ser() {
+ // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
+ // properly.
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
payment_preimage: PaymentPreimage([42; 32]),
htlc_id: 0,
};
- let mut holding_cell_htlc_updates = Vec::with_capacity(10);
- for i in 0..10 {
- if i % 3 == 0 {
+ let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
+ };
+ let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
+ htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
+ };
+ let mut holding_cell_htlc_updates = Vec::with_capacity(12);
+ for i in 0..12 {
+ if i % 5 == 0 {
holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
- } else if i % 3 == 1 {
+ } else if i % 5 == 1 {
holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
- } else {
+ } else if i % 5 == 2 {
let mut dummy_add = dummy_holding_cell_add_htlc.clone();
if let HTLCUpdateAwaitingACK::AddHTLC {
ref mut blinding_point, ref mut skimmed_fee_msat, ..
*skimmed_fee_msat = Some(42);
} else { panic!() }
holding_cell_htlc_updates.push(dummy_add);
+ } else if i % 5 == 3 {
+ holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
+ } else {
+ holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
}
}
chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();