1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
300 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
302 fn set(&mut self, flag: Self) { *self |= flag }
304 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
308 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
311 impl core::ops::BitOr for $flag_type {
313 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
315 impl core::ops::BitOrAssign for $flag_type {
316 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
318 impl core::ops::BitAnd for $flag_type {
320 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
322 impl core::ops::BitAndAssign for $flag_type {
323 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
326 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
329 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
332 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
334 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
336 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
339 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
340 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
342 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
343 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
344 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
345 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
346 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
347 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
348 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
349 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
351 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
353 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
355 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
356 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
358 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
360 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
362 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
363 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
365 impl PartialEq<FundedStateFlags> for $flag_type {
366 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
368 impl From<FundedStateFlags> for $flag_type {
369 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
374 /// We declare all the states/flags here together to help determine which bits are still available
377 pub const OUR_INIT_SENT: u32 = 1 << 0;
378 pub const THEIR_INIT_SENT: u32 = 1 << 1;
379 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
380 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
381 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
382 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
383 pub const CHANNEL_READY: u32 = 1 << 6;
384 pub const PEER_DISCONNECTED: u32 = 1 << 7;
385 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
386 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
387 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
388 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
389 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
390 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
394 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
396 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
397 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
398 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
399 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
400 somewhere and we should pause sending any outbound messages until they've managed to \
401 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
402 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
403 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
404 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
405 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
406 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
407 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
408 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
409 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
414 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
415 NegotiatingFundingFlags, [
416 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
417 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
418 ("Indicates we have received their `open_channel`/`accept_channel` message.",
419 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
424 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
425 FUNDED_STATE, AwaitingChannelReadyFlags, [
426 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
427 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
428 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
429 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
430 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
431 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
432 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
433 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
434 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
435 is being held until all channels in the batch have received `funding_signed` and have \
436 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
437 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
442 "Flags that only apply to [`ChannelState::ChannelReady`].",
443 FUNDED_STATE, ChannelReadyFlags, [
444 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
445 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
446 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
447 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
448 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
449 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
453 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
454 // into account when introducing new states and update `test_channel_state_order` accordingly.
455 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
457 /// We are negotiating the parameters required for the channel prior to funding it.
458 NegotiatingFunding(NegotiatingFundingFlags),
459 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
460 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
461 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
463 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
464 /// funding transaction to confirm.
465 AwaitingChannelReady(AwaitingChannelReadyFlags),
466 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
468 ChannelReady(ChannelReadyFlags),
469 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
470 /// is about to drop us, but we store this anyway.
474 macro_rules! impl_state_flag {
475 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
477 fn $get(&self) -> bool {
480 ChannelState::$state(flags) => flags.$get(),
489 ChannelState::$state(flags) => flags.$set(),
491 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
495 fn $clear(&mut self) {
498 ChannelState::$state(flags) => { let _ = flags.$clear(); },
500 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
504 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
505 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
507 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
508 impl_state_flag!($get, $set, $clear, [$state]);
513 fn from_u32(state: u32) -> Result<Self, ()> {
515 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
516 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
518 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
519 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
520 .map(|flags| ChannelState::AwaitingChannelReady(flags))
521 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
522 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
523 .map(|flags| ChannelState::ChannelReady(flags))
524 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
525 Ok(ChannelState::NegotiatingFunding(flags))
533 fn to_u32(&self) -> u32 {
535 ChannelState::NegotiatingFunding(flags) => flags.0,
536 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
537 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
538 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
539 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
543 fn is_pre_funded_state(&self) -> bool {
544 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
547 fn is_both_sides_shutdown(&self) -> bool {
548 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
551 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
553 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
554 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
555 _ => FundedStateFlags::new(),
559 fn can_generate_new_commitment(&self) -> bool {
561 ChannelState::ChannelReady(flags) =>
562 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
563 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
564 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
566 debug_assert!(false, "Can only generate new commitment within ChannelReady");
572 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
573 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
574 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
575 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
576 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
577 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
578 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
579 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
582 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
584 pub const DEFAULT_MAX_HTLCS: u16 = 50;
586 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
587 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
588 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
589 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
593 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
595 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
597 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
599 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
600 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
601 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
602 /// `holder_max_htlc_value_in_flight_msat`.
603 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
605 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
606 /// `option_support_large_channel` (aka wumbo channels) is not supported.
608 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
610 /// Total bitcoin supply in satoshis.
611 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
613 /// The maximum network dust limit for standard script formats. This currently represents the
614 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
615 /// transaction non-standard and thus refuses to relay it.
616 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
617 /// implementations use this value for their dust limit today.
618 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
620 /// The maximum channel dust limit we will accept from our counterparty.
621 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
623 /// The dust limit is used for both the commitment transaction outputs as well as the closing
624 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
625 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
626 /// In order to avoid having to concern ourselves with standardness during the closing process, we
627 /// simply require our counterparty to use a dust limit which will leave any segwit output
629 /// See <https://github.com/lightning/bolts/issues/905> for more details.
630 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
632 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
633 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
635 /// Used to return a simple Error back to ChannelManager. Will get converted to a
636 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
637 /// channel_id in ChannelManager.
638 pub(super) enum ChannelError {
644 impl fmt::Debug for ChannelError {
645 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
647 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
648 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
649 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
654 impl fmt::Display for ChannelError {
655 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
657 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
658 &ChannelError::Warn(ref e) => write!(f, "{}", e),
659 &ChannelError::Close(ref e) => write!(f, "{}", e),
664 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
666 pub peer_id: Option<PublicKey>,
667 pub channel_id: Option<ChannelId>,
670 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
671 fn log(&self, mut record: Record) {
672 record.peer_id = self.peer_id;
673 record.channel_id = self.channel_id;
674 self.logger.log(record)
678 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
679 where L::Target: Logger {
680 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
681 where S::Target: SignerProvider
685 peer_id: Some(context.counterparty_node_id),
686 channel_id: Some(context.channel_id),
691 macro_rules! secp_check {
692 ($res: expr, $err: expr) => {
695 Err(_) => return Err(ChannelError::Close($err)),
700 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
701 /// our counterparty or not. However, we don't want to announce updates right away to avoid
702 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
703 /// our channel_update message and track the current state here.
704 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
705 #[derive(Clone, Copy, PartialEq)]
706 pub(super) enum ChannelUpdateStatus {
707 /// We've announced the channel as enabled and are connected to our peer.
709 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
711 /// Our channel is live again, but we haven't announced the channel as enabled yet.
713 /// We've announced the channel as disabled.
717 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
719 pub enum AnnouncementSigsState {
720 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
721 /// we sent the last `AnnouncementSignatures`.
723 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
724 /// This state never appears on disk - instead we write `NotSent`.
726 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
727 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
728 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
729 /// they send back a `RevokeAndACK`.
730 /// This state never appears on disk - instead we write `NotSent`.
732 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
733 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
737 /// An enum indicating whether the local or remote side offered a given HTLC.
743 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
746 pending_htlcs_value_msat: u64,
747 on_counterparty_tx_dust_exposure_msat: u64,
748 on_holder_tx_dust_exposure_msat: u64,
749 holding_cell_msat: u64,
750 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
753 /// An enum gathering stats on commitment transaction, either local or remote.
754 struct CommitmentStats<'a> {
755 tx: CommitmentTransaction, // the transaction info
756 feerate_per_kw: u32, // the feerate included to build the transaction
757 total_fee_sat: u64, // the total fee included in the transaction
758 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
759 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
760 local_balance_msat: u64, // local balance before fees *not* considering dust limits
761 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
762 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
763 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
766 /// Used when calculating whether we or the remote can afford an additional HTLC.
767 struct HTLCCandidate {
769 origin: HTLCInitiator,
773 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
781 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
783 enum UpdateFulfillFetch {
785 monitor_update: ChannelMonitorUpdate,
786 htlc_value_msat: u64,
787 msg: Option<msgs::UpdateFulfillHTLC>,
792 /// The return type of get_update_fulfill_htlc_and_commit.
793 pub enum UpdateFulfillCommitFetch {
794 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
795 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
796 /// previously placed in the holding cell (and has since been removed).
798 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
799 monitor_update: ChannelMonitorUpdate,
800 /// The value of the HTLC which was claimed, in msat.
801 htlc_value_msat: u64,
803 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
804 /// or has been forgotten (presumably previously claimed).
808 /// The return value of `monitor_updating_restored`
809 pub(super) struct MonitorRestoreUpdates {
810 pub raa: Option<msgs::RevokeAndACK>,
811 pub commitment_update: Option<msgs::CommitmentUpdate>,
812 pub order: RAACommitmentOrder,
813 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
814 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
815 pub finalized_claimed_htlcs: Vec<HTLCSource>,
816 pub funding_broadcastable: Option<Transaction>,
817 pub channel_ready: Option<msgs::ChannelReady>,
818 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
821 /// The return value of `signer_maybe_unblocked`
823 pub(super) struct SignerResumeUpdates {
824 pub commitment_update: Option<msgs::CommitmentUpdate>,
825 pub funding_signed: Option<msgs::FundingSigned>,
826 pub channel_ready: Option<msgs::ChannelReady>,
829 /// The return value of `channel_reestablish`
830 pub(super) struct ReestablishResponses {
831 pub channel_ready: Option<msgs::ChannelReady>,
832 pub raa: Option<msgs::RevokeAndACK>,
833 pub commitment_update: Option<msgs::CommitmentUpdate>,
834 pub order: RAACommitmentOrder,
835 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
836 pub shutdown_msg: Option<msgs::Shutdown>,
839 /// The result of a shutdown that should be handled.
841 pub(crate) struct ShutdownResult {
842 pub(crate) closure_reason: ClosureReason,
843 /// A channel monitor update to apply.
844 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
845 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
846 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
847 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
848 /// propagated to the remainder of the batch.
849 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
850 pub(crate) channel_id: ChannelId,
851 pub(crate) user_channel_id: u128,
852 pub(crate) channel_capacity_satoshis: u64,
853 pub(crate) counterparty_node_id: PublicKey,
854 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
857 /// If the majority of the channels funds are to the fundee and the initiator holds only just
858 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
859 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
860 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
861 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
862 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
863 /// by this multiple without hitting this case, before sending.
864 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
865 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
866 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
867 /// leave the channel less usable as we hold a bigger reserve.
868 #[cfg(any(fuzzing, test))]
869 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
870 #[cfg(not(any(fuzzing, test)))]
871 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
873 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
874 /// channel creation on an inbound channel, we simply force-close and move on.
875 /// This constant is the one suggested in BOLT 2.
876 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
878 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
879 /// not have enough balance value remaining to cover the onchain cost of this new
880 /// HTLC weight. If this happens, our counterparty fails the reception of our
881 /// commitment_signed including this new HTLC due to infringement on the channel
883 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
884 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
885 /// leads to a channel force-close. Ultimately, this is an issue coming from the
886 /// design of LN state machines, allowing asynchronous updates.
887 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
889 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
890 /// commitment transaction fees, with at least this many HTLCs present on the commitment
891 /// transaction (not counting the value of the HTLCs themselves).
892 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
894 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
895 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
896 /// ChannelUpdate prompted by the config update. This value was determined as follows:
898 /// * The expected interval between ticks (1 minute).
899 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
900 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
901 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
902 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
904 /// The number of ticks that may elapse while we're waiting for a response to a
905 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
908 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
909 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
911 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
912 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
913 /// exceeding this age limit will be force-closed and purged from memory.
914 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
916 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
917 pub(crate) const COINBASE_MATURITY: u32 = 100;
919 struct PendingChannelMonitorUpdate {
920 update: ChannelMonitorUpdate,
923 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
924 (0, update, required),
927 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
928 /// its variants containing an appropriate channel struct.
929 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
930 UnfundedOutboundV1(OutboundV1Channel<SP>),
931 UnfundedInboundV1(InboundV1Channel<SP>),
935 impl<'a, SP: Deref> ChannelPhase<SP> where
936 SP::Target: SignerProvider,
937 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
939 pub fn context(&'a self) -> &'a ChannelContext<SP> {
941 ChannelPhase::Funded(chan) => &chan.context,
942 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
943 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
947 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
949 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
950 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
951 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
956 /// Contains all state common to unfunded inbound/outbound channels.
957 pub(super) struct UnfundedChannelContext {
958 /// A counter tracking how many ticks have elapsed since this unfunded channel was
959 /// created. If this unfunded channel reaches peer has yet to respond after reaching
960 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
962 /// This is so that we don't keep channels around that haven't progressed to a funded state
963 /// in a timely manner.
964 unfunded_channel_age_ticks: usize,
967 impl UnfundedChannelContext {
968 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
969 /// having reached the unfunded channel age limit.
971 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
972 pub fn should_expire_unfunded_channel(&mut self) -> bool {
973 self.unfunded_channel_age_ticks += 1;
974 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
978 /// Contains everything about the channel including state, and various flags.
979 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
980 config: LegacyChannelConfig,
982 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
983 // constructed using it. The second element in the tuple corresponds to the number of ticks that
984 // have elapsed since the update occurred.
985 prev_config: Option<(ChannelConfig, usize)>,
987 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
991 /// The current channel ID.
992 channel_id: ChannelId,
993 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
994 /// Will be `None` for channels created prior to 0.0.115.
995 temporary_channel_id: Option<ChannelId>,
996 channel_state: ChannelState,
998 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
999 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1001 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1002 // Note that a number of our tests were written prior to the behavior here which retransmits
1003 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1005 #[cfg(any(test, feature = "_test_utils"))]
1006 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1007 #[cfg(not(any(test, feature = "_test_utils")))]
1008 announcement_sigs_state: AnnouncementSigsState,
1010 secp_ctx: Secp256k1<secp256k1::All>,
1011 channel_value_satoshis: u64,
1013 latest_monitor_update_id: u64,
1015 holder_signer: ChannelSignerType<SP>,
1016 shutdown_scriptpubkey: Option<ShutdownScript>,
1017 destination_script: ScriptBuf,
1019 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1020 // generation start at 0 and count up...this simplifies some parts of implementation at the
1021 // cost of others, but should really just be changed.
1023 cur_holder_commitment_transaction_number: u64,
1024 cur_counterparty_commitment_transaction_number: u64,
1025 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1026 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1027 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1028 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1030 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1031 /// need to ensure we resend them in the order we originally generated them. Note that because
1032 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1033 /// sufficient to simply set this to the opposite of any message we are generating as we
1034 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1035 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1037 resend_order: RAACommitmentOrder,
1039 monitor_pending_channel_ready: bool,
1040 monitor_pending_revoke_and_ack: bool,
1041 monitor_pending_commitment_signed: bool,
1043 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1044 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1045 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1046 // but need to handle this somehow or we run the risk of losing HTLCs!
1047 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1048 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1049 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1051 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1052 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1053 /// the future when the signer indicates it may have a signature for us.
1055 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1056 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1057 signer_pending_commitment_update: bool,
1058 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1059 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1060 /// outbound or inbound.
1061 signer_pending_funding: bool,
1063 // pending_update_fee is filled when sending and receiving update_fee.
1065 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1066 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1067 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1068 // HTLCs with similar state.
1069 pending_update_fee: Option<(u32, FeeUpdateState)>,
1070 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1071 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1072 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1073 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1074 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1075 holding_cell_update_fee: Option<u32>,
1076 next_holder_htlc_id: u64,
1077 next_counterparty_htlc_id: u64,
1078 feerate_per_kw: u32,
1080 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1081 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1082 /// new block is received, ensuring it's always at least moderately close to the current real
1084 update_time_counter: u32,
1086 #[cfg(debug_assertions)]
1087 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1088 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1089 #[cfg(debug_assertions)]
1090 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1091 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1093 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1094 target_closing_feerate_sats_per_kw: Option<u32>,
1096 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1097 /// update, we need to delay processing it until later. We do that here by simply storing the
1098 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1099 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1101 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1102 /// transaction. These are set once we reach `closing_negotiation_ready`.
1104 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1106 closing_fee_limits: Option<(u64, u64)>,
1108 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1109 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1110 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1111 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1112 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1114 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1115 /// until we see a `commitment_signed` before doing so.
1117 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1118 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1119 expecting_peer_commitment_signed: bool,
1121 /// The hash of the block in which the funding transaction was included.
1122 funding_tx_confirmed_in: Option<BlockHash>,
1123 funding_tx_confirmation_height: u32,
1124 short_channel_id: Option<u64>,
1125 /// Either the height at which this channel was created or the height at which it was last
1126 /// serialized if it was serialized by versions prior to 0.0.103.
1127 /// We use this to close if funding is never broadcasted.
1128 channel_creation_height: u32,
1130 counterparty_dust_limit_satoshis: u64,
1133 pub(super) holder_dust_limit_satoshis: u64,
1135 holder_dust_limit_satoshis: u64,
1138 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1140 counterparty_max_htlc_value_in_flight_msat: u64,
1143 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1145 holder_max_htlc_value_in_flight_msat: u64,
1147 /// minimum channel reserve for self to maintain - set by them.
1148 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1151 pub(super) holder_selected_channel_reserve_satoshis: u64,
1153 holder_selected_channel_reserve_satoshis: u64,
1155 counterparty_htlc_minimum_msat: u64,
1156 holder_htlc_minimum_msat: u64,
1158 pub counterparty_max_accepted_htlcs: u16,
1160 counterparty_max_accepted_htlcs: u16,
1161 holder_max_accepted_htlcs: u16,
1162 minimum_depth: Option<u32>,
1164 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1166 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1167 funding_transaction: Option<Transaction>,
1168 is_batch_funding: Option<()>,
1170 counterparty_cur_commitment_point: Option<PublicKey>,
1171 counterparty_prev_commitment_point: Option<PublicKey>,
1172 counterparty_node_id: PublicKey,
1174 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1176 commitment_secrets: CounterpartyCommitmentSecrets,
1178 channel_update_status: ChannelUpdateStatus,
1179 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1180 /// not complete within a single timer tick (one minute), we should force-close the channel.
1181 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1183 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1184 /// our peer and start the closing_signed negotiation fresh.
1185 closing_signed_in_flight: bool,
1187 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1188 /// This can be used to rebroadcast the channel_announcement message later.
1189 announcement_sigs: Option<(Signature, Signature)>,
1191 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1192 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1193 // be, by comparing the cached values to the fee of the tranaction generated by
1194 // `build_commitment_transaction`.
1195 #[cfg(any(test, fuzzing))]
1196 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1197 #[cfg(any(test, fuzzing))]
1198 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1200 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1201 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1202 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1203 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1204 /// message until we receive a channel_reestablish.
1206 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1207 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1209 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1210 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1211 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1212 /// unblock the state machine.
1214 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1215 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1216 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1218 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1219 /// [`msgs::RevokeAndACK`] message from the counterparty.
1220 sent_message_awaiting_response: Option<usize>,
1222 #[cfg(any(test, fuzzing))]
1223 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1224 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1225 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1226 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1227 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1228 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1229 historical_inbound_htlc_fulfills: HashSet<u64>,
1231 /// This channel's type, as negotiated during channel open
1232 channel_type: ChannelTypeFeatures,
1234 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1235 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1236 // the channel's funding UTXO.
1238 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1239 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1240 // associated channel mapping.
1242 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1243 // to store all of them.
1244 latest_inbound_scid_alias: Option<u64>,
1246 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1247 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1248 // don't currently support node id aliases and eventually privacy should be provided with
1249 // blinded paths instead of simple scid+node_id aliases.
1250 outbound_scid_alias: u64,
1252 // We track whether we already emitted a `ChannelPending` event.
1253 channel_pending_event_emitted: bool,
1255 // We track whether we already emitted a `ChannelReady` event.
1256 channel_ready_event_emitted: bool,
1258 /// The unique identifier used to re-derive the private key material for the channel through
1259 /// [`SignerProvider::derive_channel_signer`].
1260 channel_keys_id: [u8; 32],
1262 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1263 /// store it here and only release it to the `ChannelManager` once it asks for it.
1264 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1267 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1268 /// Allowed in any state (including after shutdown)
1269 pub fn get_update_time_counter(&self) -> u32 {
1270 self.update_time_counter
1273 pub fn get_latest_monitor_update_id(&self) -> u64 {
1274 self.latest_monitor_update_id
1277 pub fn should_announce(&self) -> bool {
1278 self.config.announced_channel
1281 pub fn is_outbound(&self) -> bool {
1282 self.channel_transaction_parameters.is_outbound_from_holder
1285 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1286 /// Allowed in any state (including after shutdown)
1287 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1288 self.config.options.forwarding_fee_base_msat
1291 /// Returns true if we've ever received a message from the remote end for this Channel
1292 pub fn have_received_message(&self) -> bool {
1293 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1296 /// Returns true if this channel is fully established and not known to be closing.
1297 /// Allowed in any state (including after shutdown)
1298 pub fn is_usable(&self) -> bool {
1299 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1300 !self.channel_state.is_local_shutdown_sent() &&
1301 !self.channel_state.is_remote_shutdown_sent() &&
1302 !self.monitor_pending_channel_ready
1305 /// shutdown state returns the state of the channel in its various stages of shutdown
1306 pub fn shutdown_state(&self) -> ChannelShutdownState {
1307 match self.channel_state {
1308 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1309 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1310 ChannelShutdownState::ShutdownInitiated
1311 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1312 ChannelShutdownState::ResolvingHTLCs
1313 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1314 ChannelShutdownState::NegotiatingClosingFee
1316 ChannelShutdownState::NotShuttingDown
1318 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1319 _ => ChannelShutdownState::NotShuttingDown,
1323 fn closing_negotiation_ready(&self) -> bool {
1324 let is_ready_to_close = match self.channel_state {
1325 ChannelState::AwaitingChannelReady(flags) =>
1326 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1327 ChannelState::ChannelReady(flags) =>
1328 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1331 self.pending_inbound_htlcs.is_empty() &&
1332 self.pending_outbound_htlcs.is_empty() &&
1333 self.pending_update_fee.is_none() &&
1337 /// Returns true if this channel is currently available for use. This is a superset of
1338 /// is_usable() and considers things like the channel being temporarily disabled.
1339 /// Allowed in any state (including after shutdown)
1340 pub fn is_live(&self) -> bool {
1341 self.is_usable() && !self.channel_state.is_peer_disconnected()
1344 // Public utilities:
1346 pub fn channel_id(&self) -> ChannelId {
1350 // Return the `temporary_channel_id` used during channel establishment.
1352 // Will return `None` for channels created prior to LDK version 0.0.115.
1353 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1354 self.temporary_channel_id
1357 pub fn minimum_depth(&self) -> Option<u32> {
1361 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1362 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1363 pub fn get_user_id(&self) -> u128 {
1367 /// Gets the channel's type
1368 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1372 /// Gets the channel's `short_channel_id`.
1374 /// Will return `None` if the channel hasn't been confirmed yet.
1375 pub fn get_short_channel_id(&self) -> Option<u64> {
1376 self.short_channel_id
1379 /// Allowed in any state (including after shutdown)
1380 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1381 self.latest_inbound_scid_alias
1384 /// Allowed in any state (including after shutdown)
1385 pub fn outbound_scid_alias(&self) -> u64 {
1386 self.outbound_scid_alias
1389 /// Returns the holder signer for this channel.
1391 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1392 return &self.holder_signer
1395 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1396 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1397 /// or prior to any channel actions during `Channel` initialization.
1398 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1399 debug_assert_eq!(self.outbound_scid_alias, 0);
1400 self.outbound_scid_alias = outbound_scid_alias;
1403 /// Returns the funding_txo we either got from our peer, or were given by
1404 /// get_funding_created.
1405 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1406 self.channel_transaction_parameters.funding_outpoint
1409 /// Returns the height in which our funding transaction was confirmed.
1410 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1411 let conf_height = self.funding_tx_confirmation_height;
1412 if conf_height > 0 {
1419 /// Returns the block hash in which our funding transaction was confirmed.
1420 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1421 self.funding_tx_confirmed_in
1424 /// Returns the current number of confirmations on the funding transaction.
1425 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1426 if self.funding_tx_confirmation_height == 0 {
1427 // We either haven't seen any confirmation yet, or observed a reorg.
1431 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1434 fn get_holder_selected_contest_delay(&self) -> u16 {
1435 self.channel_transaction_parameters.holder_selected_contest_delay
1438 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1439 &self.channel_transaction_parameters.holder_pubkeys
1442 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1443 self.channel_transaction_parameters.counterparty_parameters
1444 .as_ref().map(|params| params.selected_contest_delay)
1447 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1448 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1451 /// Allowed in any state (including after shutdown)
1452 pub fn get_counterparty_node_id(&self) -> PublicKey {
1453 self.counterparty_node_id
1456 /// Allowed in any state (including after shutdown)
1457 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1458 self.holder_htlc_minimum_msat
1461 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1462 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1463 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1466 /// Allowed in any state (including after shutdown)
1467 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1469 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1470 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1471 // channel might have been used to route very small values (either by honest users or as DoS).
1472 self.channel_value_satoshis * 1000 * 9 / 10,
1474 self.counterparty_max_htlc_value_in_flight_msat
1478 /// Allowed in any state (including after shutdown)
1479 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1480 self.counterparty_htlc_minimum_msat
1483 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1484 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1485 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1488 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1489 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1490 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1492 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1493 party_max_htlc_value_in_flight_msat
1498 pub fn get_value_satoshis(&self) -> u64 {
1499 self.channel_value_satoshis
1502 pub fn get_fee_proportional_millionths(&self) -> u32 {
1503 self.config.options.forwarding_fee_proportional_millionths
1506 pub fn get_cltv_expiry_delta(&self) -> u16 {
1507 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1510 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1511 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1512 where F::Target: FeeEstimator
1514 match self.config.options.max_dust_htlc_exposure {
1515 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1516 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1517 ConfirmationTarget::OnChainSweep) as u64;
1518 feerate_per_kw.saturating_mul(multiplier)
1520 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1524 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1525 pub fn prev_config(&self) -> Option<ChannelConfig> {
1526 self.prev_config.map(|prev_config| prev_config.0)
1529 // Checks whether we should emit a `ChannelPending` event.
1530 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1531 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1534 // Returns whether we already emitted a `ChannelPending` event.
1535 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1536 self.channel_pending_event_emitted
1539 // Remembers that we already emitted a `ChannelPending` event.
1540 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1541 self.channel_pending_event_emitted = true;
1544 // Checks whether we should emit a `ChannelReady` event.
1545 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1546 self.is_usable() && !self.channel_ready_event_emitted
1549 // Remembers that we already emitted a `ChannelReady` event.
1550 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1551 self.channel_ready_event_emitted = true;
1554 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1555 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1556 /// no longer be considered when forwarding HTLCs.
1557 pub fn maybe_expire_prev_config(&mut self) {
1558 if self.prev_config.is_none() {
1561 let prev_config = self.prev_config.as_mut().unwrap();
1563 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1564 self.prev_config = None;
1568 /// Returns the current [`ChannelConfig`] applied to the channel.
1569 pub fn config(&self) -> ChannelConfig {
1573 /// Updates the channel's config. A bool is returned indicating whether the config update
1574 /// applied resulted in a new ChannelUpdate message.
1575 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1576 let did_channel_update =
1577 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1578 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1579 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1580 if did_channel_update {
1581 self.prev_config = Some((self.config.options, 0));
1582 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1583 // policy change to propagate throughout the network.
1584 self.update_time_counter += 1;
1586 self.config.options = *config;
1590 /// Returns true if funding_signed was sent/received and the
1591 /// funding transaction has been broadcast if necessary.
1592 pub fn is_funding_broadcast(&self) -> bool {
1593 !self.channel_state.is_pre_funded_state() &&
1594 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1597 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1598 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1599 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1600 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1601 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1603 /// @local is used only to convert relevant internal structures which refer to remote vs local
1604 /// to decide value of outputs and direction of HTLCs.
1605 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1606 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1607 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1608 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1609 /// which peer generated this transaction and "to whom" this transaction flows.
1611 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1612 where L::Target: Logger
1614 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1615 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1616 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1618 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1619 let mut remote_htlc_total_msat = 0;
1620 let mut local_htlc_total_msat = 0;
1621 let mut value_to_self_msat_offset = 0;
1623 let mut feerate_per_kw = self.feerate_per_kw;
1624 if let Some((feerate, update_state)) = self.pending_update_fee {
1625 if match update_state {
1626 // Note that these match the inclusion criteria when scanning
1627 // pending_inbound_htlcs below.
1628 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1629 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1630 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1632 feerate_per_kw = feerate;
1636 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1637 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1638 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1640 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1642 macro_rules! get_htlc_in_commitment {
1643 ($htlc: expr, $offered: expr) => {
1644 HTLCOutputInCommitment {
1646 amount_msat: $htlc.amount_msat,
1647 cltv_expiry: $htlc.cltv_expiry,
1648 payment_hash: $htlc.payment_hash,
1649 transaction_output_index: None
1654 macro_rules! add_htlc_output {
1655 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1656 if $outbound == local { // "offered HTLC output"
1657 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1658 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1661 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1663 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1664 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1665 included_non_dust_htlcs.push((htlc_in_tx, $source));
1667 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1668 included_dust_htlcs.push((htlc_in_tx, $source));
1671 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1672 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1675 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1677 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1678 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1679 included_non_dust_htlcs.push((htlc_in_tx, $source));
1681 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1682 included_dust_htlcs.push((htlc_in_tx, $source));
1688 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1690 for ref htlc in self.pending_inbound_htlcs.iter() {
1691 let (include, state_name) = match htlc.state {
1692 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1693 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1694 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1695 InboundHTLCState::Committed => (true, "Committed"),
1696 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1700 add_htlc_output!(htlc, false, None, state_name);
1701 remote_htlc_total_msat += htlc.amount_msat;
1703 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1705 &InboundHTLCState::LocalRemoved(ref reason) => {
1706 if generated_by_local {
1707 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1708 inbound_htlc_preimages.push(preimage);
1709 value_to_self_msat_offset += htlc.amount_msat as i64;
1719 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1721 for ref htlc in self.pending_outbound_htlcs.iter() {
1722 let (include, state_name) = match htlc.state {
1723 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1724 OutboundHTLCState::Committed => (true, "Committed"),
1725 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1726 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1727 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1730 let preimage_opt = match htlc.state {
1731 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1732 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1733 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1737 if let Some(preimage) = preimage_opt {
1738 outbound_htlc_preimages.push(preimage);
1742 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1743 local_htlc_total_msat += htlc.amount_msat;
1745 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1747 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1748 value_to_self_msat_offset -= htlc.amount_msat as i64;
1750 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1751 if !generated_by_local {
1752 value_to_self_msat_offset -= htlc.amount_msat as i64;
1760 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1761 assert!(value_to_self_msat >= 0);
1762 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1763 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1764 // "violate" their reserve value by couting those against it. Thus, we have to convert
1765 // everything to i64 before subtracting as otherwise we can overflow.
1766 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1767 assert!(value_to_remote_msat >= 0);
1769 #[cfg(debug_assertions)]
1771 // Make sure that the to_self/to_remote is always either past the appropriate
1772 // channel_reserve *or* it is making progress towards it.
1773 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1774 self.holder_max_commitment_tx_output.lock().unwrap()
1776 self.counterparty_max_commitment_tx_output.lock().unwrap()
1778 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1779 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1780 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1781 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1784 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1785 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1786 let (value_to_self, value_to_remote) = if self.is_outbound() {
1787 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1789 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1792 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1793 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1794 let (funding_pubkey_a, funding_pubkey_b) = if local {
1795 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1797 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1800 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1801 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1806 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1807 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1812 let num_nondust_htlcs = included_non_dust_htlcs.len();
1814 let channel_parameters =
1815 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1816 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1817 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1824 &mut included_non_dust_htlcs,
1827 let mut htlcs_included = included_non_dust_htlcs;
1828 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1829 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1830 htlcs_included.append(&mut included_dust_htlcs);
1838 local_balance_msat: value_to_self_msat as u64,
1839 remote_balance_msat: value_to_remote_msat as u64,
1840 inbound_htlc_preimages,
1841 outbound_htlc_preimages,
1846 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1847 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1848 /// our counterparty!)
1849 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1850 /// TODO Some magic rust shit to compile-time check this?
1851 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1852 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1853 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1854 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1855 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1857 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1861 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1862 /// will sign and send to our counterparty.
1863 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1864 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1865 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1866 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1867 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1869 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1872 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1873 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1874 /// Panics if called before accept_channel/InboundV1Channel::new
1875 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1876 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1879 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1880 &self.get_counterparty_pubkeys().funding_pubkey
1883 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1887 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1888 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1889 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1890 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1891 // more dust balance if the feerate increases when we have several HTLCs pending
1892 // which are near the dust limit.
1893 let mut feerate_per_kw = self.feerate_per_kw;
1894 // If there's a pending update fee, use it to ensure we aren't under-estimating
1895 // potential feerate updates coming soon.
1896 if let Some((feerate, _)) = self.pending_update_fee {
1897 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1899 if let Some(feerate) = outbound_feerate_update {
1900 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1902 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1903 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1906 /// Get forwarding information for the counterparty.
1907 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1908 self.counterparty_forwarding_info.clone()
1911 /// Returns a HTLCStats about inbound pending htlcs
1912 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1914 let mut stats = HTLCStats {
1915 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1916 pending_htlcs_value_msat: 0,
1917 on_counterparty_tx_dust_exposure_msat: 0,
1918 on_holder_tx_dust_exposure_msat: 0,
1919 holding_cell_msat: 0,
1920 on_holder_tx_holding_cell_htlcs_count: 0,
1923 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1926 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1927 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1928 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1930 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1931 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1932 for ref htlc in context.pending_inbound_htlcs.iter() {
1933 stats.pending_htlcs_value_msat += htlc.amount_msat;
1934 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1935 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1937 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1938 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1944 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1945 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1947 let mut stats = HTLCStats {
1948 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1949 pending_htlcs_value_msat: 0,
1950 on_counterparty_tx_dust_exposure_msat: 0,
1951 on_holder_tx_dust_exposure_msat: 0,
1952 holding_cell_msat: 0,
1953 on_holder_tx_holding_cell_htlcs_count: 0,
1956 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1959 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1960 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1961 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1963 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1964 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1965 for ref htlc in context.pending_outbound_htlcs.iter() {
1966 stats.pending_htlcs_value_msat += htlc.amount_msat;
1967 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1968 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1970 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1971 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1975 for update in context.holding_cell_htlc_updates.iter() {
1976 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1977 stats.pending_htlcs += 1;
1978 stats.pending_htlcs_value_msat += amount_msat;
1979 stats.holding_cell_msat += amount_msat;
1980 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1981 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1983 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1984 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1986 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1993 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1994 /// Doesn't bother handling the
1995 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1996 /// corner case properly.
1997 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1998 -> AvailableBalances
1999 where F::Target: FeeEstimator
2001 let context = &self;
2002 // Note that we have to handle overflow due to the above case.
2003 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2004 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2006 let mut balance_msat = context.value_to_self_msat;
2007 for ref htlc in context.pending_inbound_htlcs.iter() {
2008 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2009 balance_msat += htlc.amount_msat;
2012 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2014 let outbound_capacity_msat = context.value_to_self_msat
2015 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2017 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2019 let mut available_capacity_msat = outbound_capacity_msat;
2021 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2022 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2026 if context.is_outbound() {
2027 // We should mind channel commit tx fee when computing how much of the available capacity
2028 // can be used in the next htlc. Mirrors the logic in send_htlc.
2030 // The fee depends on whether the amount we will be sending is above dust or not,
2031 // and the answer will in turn change the amount itself — making it a circular
2033 // This complicates the computation around dust-values, up to the one-htlc-value.
2034 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2035 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2036 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2039 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2040 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2041 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2042 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2043 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2044 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2045 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2048 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2049 // value ends up being below dust, we have this fee available again. In that case,
2050 // match the value to right-below-dust.
2051 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2052 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2053 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2054 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2055 debug_assert!(one_htlc_difference_msat != 0);
2056 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2057 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2058 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2060 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2063 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2064 // sending a new HTLC won't reduce their balance below our reserve threshold.
2065 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2066 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2067 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2070 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2071 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2073 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2074 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2075 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2077 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2078 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2079 // we've selected for them, we can only send dust HTLCs.
2080 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2084 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2086 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2087 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2088 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2089 // send above the dust limit (as the router can always overpay to meet the dust limit).
2090 let mut remaining_msat_below_dust_exposure_limit = None;
2091 let mut dust_exposure_dust_limit_msat = 0;
2092 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2094 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2095 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2097 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2098 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2099 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2101 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2102 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2103 remaining_msat_below_dust_exposure_limit =
2104 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2105 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2108 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2109 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2110 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2111 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2112 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2113 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2116 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2117 if available_capacity_msat < dust_exposure_dust_limit_msat {
2118 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2120 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2124 available_capacity_msat = cmp::min(available_capacity_msat,
2125 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2127 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2128 available_capacity_msat = 0;
2132 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2133 - context.value_to_self_msat as i64
2134 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2135 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2137 outbound_capacity_msat,
2138 next_outbound_htlc_limit_msat: available_capacity_msat,
2139 next_outbound_htlc_minimum_msat,
2144 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2145 let context = &self;
2146 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2149 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2150 /// number of pending HTLCs that are on track to be in our next commitment tx.
2152 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2153 /// `fee_spike_buffer_htlc` is `Some`.
2155 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2156 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2158 /// Dust HTLCs are excluded.
2159 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2160 let context = &self;
2161 assert!(context.is_outbound());
2163 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2166 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2167 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2169 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2170 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2172 let mut addl_htlcs = 0;
2173 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2175 HTLCInitiator::LocalOffered => {
2176 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2180 HTLCInitiator::RemoteOffered => {
2181 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2187 let mut included_htlcs = 0;
2188 for ref htlc in context.pending_inbound_htlcs.iter() {
2189 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2192 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2193 // transaction including this HTLC if it times out before they RAA.
2194 included_htlcs += 1;
2197 for ref htlc in context.pending_outbound_htlcs.iter() {
2198 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2202 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2203 OutboundHTLCState::Committed => included_htlcs += 1,
2204 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2205 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2206 // transaction won't be generated until they send us their next RAA, which will mean
2207 // dropping any HTLCs in this state.
2212 for htlc in context.holding_cell_htlc_updates.iter() {
2214 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2215 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2220 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2221 // ack we're guaranteed to never include them in commitment txs anymore.
2225 let num_htlcs = included_htlcs + addl_htlcs;
2226 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2227 #[cfg(any(test, fuzzing))]
2230 if fee_spike_buffer_htlc.is_some() {
2231 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2233 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2234 + context.holding_cell_htlc_updates.len();
2235 let commitment_tx_info = CommitmentTxInfoCached {
2237 total_pending_htlcs,
2238 next_holder_htlc_id: match htlc.origin {
2239 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2240 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2242 next_counterparty_htlc_id: match htlc.origin {
2243 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2244 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2246 feerate: context.feerate_per_kw,
2248 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2253 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2254 /// pending HTLCs that are on track to be in their next commitment tx
2256 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2257 /// `fee_spike_buffer_htlc` is `Some`.
2259 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2260 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2262 /// Dust HTLCs are excluded.
2263 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2264 let context = &self;
2265 assert!(!context.is_outbound());
2267 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2270 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2271 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2273 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2274 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2276 let mut addl_htlcs = 0;
2277 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2279 HTLCInitiator::LocalOffered => {
2280 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2284 HTLCInitiator::RemoteOffered => {
2285 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2291 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2292 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2293 // committed outbound HTLCs, see below.
2294 let mut included_htlcs = 0;
2295 for ref htlc in context.pending_inbound_htlcs.iter() {
2296 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2299 included_htlcs += 1;
2302 for ref htlc in context.pending_outbound_htlcs.iter() {
2303 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2306 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2307 // i.e. if they've responded to us with an RAA after announcement.
2309 OutboundHTLCState::Committed => included_htlcs += 1,
2310 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2311 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2316 let num_htlcs = included_htlcs + addl_htlcs;
2317 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2318 #[cfg(any(test, fuzzing))]
2321 if fee_spike_buffer_htlc.is_some() {
2322 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2324 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2325 let commitment_tx_info = CommitmentTxInfoCached {
2327 total_pending_htlcs,
2328 next_holder_htlc_id: match htlc.origin {
2329 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2330 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2332 next_counterparty_htlc_id: match htlc.origin {
2333 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2334 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2336 feerate: context.feerate_per_kw,
2338 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2343 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2344 match self.channel_state {
2345 ChannelState::FundingNegotiated => f(),
2346 ChannelState::AwaitingChannelReady(flags) =>
2347 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2348 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2358 /// Returns the transaction if there is a pending funding transaction that is yet to be
2360 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2361 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2364 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2366 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2367 self.if_unbroadcasted_funding(||
2368 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2372 /// Returns whether the channel is funded in a batch.
2373 pub fn is_batch_funding(&self) -> bool {
2374 self.is_batch_funding.is_some()
2377 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2379 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2380 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2383 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2384 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2385 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2386 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2387 /// immediately (others we will have to allow to time out).
2388 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2389 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2390 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2391 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2392 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2393 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2395 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2396 // return them to fail the payment.
2397 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2398 let counterparty_node_id = self.get_counterparty_node_id();
2399 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2401 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2402 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2407 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2408 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2409 // returning a channel monitor update here would imply a channel monitor update before
2410 // we even registered the channel monitor to begin with, which is invalid.
2411 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2412 // funding transaction, don't return a funding txo (which prevents providing the
2413 // monitor update to the user, even if we return one).
2414 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2415 if !self.channel_state.is_pre_funded_state() {
2416 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2417 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2418 update_id: self.latest_monitor_update_id,
2419 counterparty_node_id: Some(self.counterparty_node_id),
2420 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2424 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2425 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2427 self.channel_state = ChannelState::ShutdownComplete;
2428 self.update_time_counter += 1;
2432 dropped_outbound_htlcs,
2433 unbroadcasted_batch_funding_txid,
2434 channel_id: self.channel_id,
2435 user_channel_id: self.user_id,
2436 channel_capacity_satoshis: self.channel_value_satoshis,
2437 counterparty_node_id: self.counterparty_node_id,
2438 unbroadcasted_funding_tx,
2442 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2443 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2444 let counterparty_keys = self.build_remote_transaction_keys();
2445 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2447 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2448 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2449 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2450 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2452 match &self.holder_signer {
2453 // TODO (arik): move match into calling method for Taproot
2454 ChannelSignerType::Ecdsa(ecdsa) => {
2455 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2456 .map(|(signature, _)| msgs::FundingSigned {
2457 channel_id: self.channel_id(),
2460 partial_signature_with_nonce: None,
2464 if funding_signed.is_none() {
2465 #[cfg(not(async_signing))] {
2466 panic!("Failed to get signature for funding_signed");
2468 #[cfg(async_signing)] {
2469 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2470 self.signer_pending_funding = true;
2472 } else if self.signer_pending_funding {
2473 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2474 self.signer_pending_funding = false;
2477 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2478 (counterparty_initial_commitment_tx, funding_signed)
2480 // TODO (taproot|arik)
2487 // Internal utility functions for channels
2489 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2490 /// `channel_value_satoshis` in msat, set through
2491 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2493 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2495 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2496 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2497 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2499 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2502 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2504 channel_value_satoshis * 10 * configured_percent
2507 /// Returns a minimum channel reserve value the remote needs to maintain,
2508 /// required by us according to the configured or default
2509 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2511 /// Guaranteed to return a value no larger than channel_value_satoshis
2513 /// This is used both for outbound and inbound channels and has lower bound
2514 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2515 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2516 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2517 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2520 /// This is for legacy reasons, present for forward-compatibility.
2521 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2522 /// from storage. Hence, we use this function to not persist default values of
2523 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2524 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2525 let (q, _) = channel_value_satoshis.overflowing_div(100);
2526 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2529 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2530 // Note that num_htlcs should not include dust HTLCs.
2532 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2533 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2536 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2537 // Note that num_htlcs should not include dust HTLCs.
2538 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2539 // Note that we need to divide before multiplying to round properly,
2540 // since the lowest denomination of bitcoin on-chain is the satoshi.
2541 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2544 // Holder designates channel data owned for the benefit of the user client.
2545 // Counterparty designates channel data owned by the another channel participant entity.
2546 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2547 pub context: ChannelContext<SP>,
2550 #[cfg(any(test, fuzzing))]
2551 struct CommitmentTxInfoCached {
2553 total_pending_htlcs: usize,
2554 next_holder_htlc_id: u64,
2555 next_counterparty_htlc_id: u64,
2559 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2560 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2561 trait FailHTLCContents {
2562 type Message: FailHTLCMessageName;
2563 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2564 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2565 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2567 impl FailHTLCContents for msgs::OnionErrorPacket {
2568 type Message = msgs::UpdateFailHTLC;
2569 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2570 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2572 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2573 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2575 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2576 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2579 impl FailHTLCContents for ([u8; 32], u16) {
2580 type Message = msgs::UpdateFailMalformedHTLC;
2581 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2582 msgs::UpdateFailMalformedHTLC {
2585 sha256_of_onion: self.0,
2586 failure_code: self.1
2589 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2590 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2592 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2593 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2595 sha256_of_onion: self.0,
2596 failure_code: self.1
2601 trait FailHTLCMessageName {
2602 fn name() -> &'static str;
2604 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2605 fn name() -> &'static str {
2609 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2610 fn name() -> &'static str {
2611 "update_fail_malformed_htlc"
2615 impl<SP: Deref> Channel<SP> where
2616 SP::Target: SignerProvider,
2617 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2619 fn check_remote_fee<F: Deref, L: Deref>(
2620 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2621 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2622 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2624 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2625 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2627 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2629 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2630 if feerate_per_kw < lower_limit {
2631 if let Some(cur_feerate) = cur_feerate_per_kw {
2632 if feerate_per_kw > cur_feerate {
2634 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2635 cur_feerate, feerate_per_kw);
2639 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2645 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2646 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2647 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2648 // outside of those situations will fail.
2649 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2653 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2658 1 + // script length (0)
2662 )*4 + // * 4 for non-witness parts
2663 2 + // witness marker and flag
2664 1 + // witness element count
2665 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2666 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2667 2*(1 + 71); // two signatures + sighash type flags
2668 if let Some(spk) = a_scriptpubkey {
2669 ret += ((8+1) + // output values and script length
2670 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2672 if let Some(spk) = b_scriptpubkey {
2673 ret += ((8+1) + // output values and script length
2674 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2680 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2681 assert!(self.context.pending_inbound_htlcs.is_empty());
2682 assert!(self.context.pending_outbound_htlcs.is_empty());
2683 assert!(self.context.pending_update_fee.is_none());
2685 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2686 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2687 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2689 if value_to_holder < 0 {
2690 assert!(self.context.is_outbound());
2691 total_fee_satoshis += (-value_to_holder) as u64;
2692 } else if value_to_counterparty < 0 {
2693 assert!(!self.context.is_outbound());
2694 total_fee_satoshis += (-value_to_counterparty) as u64;
2697 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2698 value_to_counterparty = 0;
2701 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2702 value_to_holder = 0;
2705 assert!(self.context.shutdown_scriptpubkey.is_some());
2706 let holder_shutdown_script = self.get_closing_scriptpubkey();
2707 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2708 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2710 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2711 (closing_transaction, total_fee_satoshis)
2714 fn funding_outpoint(&self) -> OutPoint {
2715 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2718 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2721 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2722 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2724 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2726 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2727 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2728 where L::Target: Logger {
2729 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2730 // (see equivalent if condition there).
2731 assert!(!self.context.channel_state.can_generate_new_commitment());
2732 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2733 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2734 self.context.latest_monitor_update_id = mon_update_id;
2735 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2736 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2740 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2741 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2742 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2743 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2745 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2746 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2749 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2750 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2751 // these, but for now we just have to treat them as normal.
2753 let mut pending_idx = core::usize::MAX;
2754 let mut htlc_value_msat = 0;
2755 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2756 if htlc.htlc_id == htlc_id_arg {
2757 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2758 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2759 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2761 InboundHTLCState::Committed => {},
2762 InboundHTLCState::LocalRemoved(ref reason) => {
2763 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2765 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2766 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2768 return UpdateFulfillFetch::DuplicateClaim {};
2771 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2772 // Don't return in release mode here so that we can update channel_monitor
2776 htlc_value_msat = htlc.amount_msat;
2780 if pending_idx == core::usize::MAX {
2781 #[cfg(any(test, fuzzing))]
2782 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2783 // this is simply a duplicate claim, not previously failed and we lost funds.
2784 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2785 return UpdateFulfillFetch::DuplicateClaim {};
2788 // Now update local state:
2790 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2791 // can claim it even if the channel hits the chain before we see their next commitment.
2792 self.context.latest_monitor_update_id += 1;
2793 let monitor_update = ChannelMonitorUpdate {
2794 update_id: self.context.latest_monitor_update_id,
2795 counterparty_node_id: Some(self.context.counterparty_node_id),
2796 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2797 payment_preimage: payment_preimage_arg.clone(),
2801 if !self.context.channel_state.can_generate_new_commitment() {
2802 // Note that this condition is the same as the assertion in
2803 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2804 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2805 // do not not get into this branch.
2806 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2807 match pending_update {
2808 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2809 if htlc_id_arg == htlc_id {
2810 // Make sure we don't leave latest_monitor_update_id incremented here:
2811 self.context.latest_monitor_update_id -= 1;
2812 #[cfg(any(test, fuzzing))]
2813 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2814 return UpdateFulfillFetch::DuplicateClaim {};
2817 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2818 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2820 if htlc_id_arg == htlc_id {
2821 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2822 // TODO: We may actually be able to switch to a fulfill here, though its
2823 // rare enough it may not be worth the complexity burden.
2824 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2825 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2831 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2832 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2833 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2835 #[cfg(any(test, fuzzing))]
2836 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2837 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2839 #[cfg(any(test, fuzzing))]
2840 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2843 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2844 if let InboundHTLCState::Committed = htlc.state {
2846 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2847 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2849 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2850 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2853 UpdateFulfillFetch::NewClaim {
2856 msg: Some(msgs::UpdateFulfillHTLC {
2857 channel_id: self.context.channel_id(),
2858 htlc_id: htlc_id_arg,
2859 payment_preimage: payment_preimage_arg,
2864 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2865 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2866 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2867 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2868 // Even if we aren't supposed to let new monitor updates with commitment state
2869 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2870 // matter what. Sadly, to push a new monitor update which flies before others
2871 // already queued, we have to insert it into the pending queue and update the
2872 // update_ids of all the following monitors.
2873 if release_cs_monitor && msg.is_some() {
2874 let mut additional_update = self.build_commitment_no_status_check(logger);
2875 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2876 // to be strictly increasing by one, so decrement it here.
2877 self.context.latest_monitor_update_id = monitor_update.update_id;
2878 monitor_update.updates.append(&mut additional_update.updates);
2880 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2881 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2882 monitor_update.update_id = new_mon_id;
2883 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2884 held_update.update.update_id += 1;
2887 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2888 let update = self.build_commitment_no_status_check(logger);
2889 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2895 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2896 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2898 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2902 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2903 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2904 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2905 /// before we fail backwards.
2907 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2908 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2909 /// [`ChannelError::Ignore`].
2910 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2911 -> Result<(), ChannelError> where L::Target: Logger {
2912 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2913 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2916 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2917 /// want to fail blinded HTLCs where we are not the intro node.
2919 /// See [`Self::queue_fail_htlc`] for more info.
2920 pub fn queue_fail_malformed_htlc<L: Deref>(
2921 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2922 ) -> Result<(), ChannelError> where L::Target: Logger {
2923 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2924 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2927 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2928 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2929 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2930 /// before we fail backwards.
2932 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2933 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2934 /// [`ChannelError::Ignore`].
2935 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2936 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
2938 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2939 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2940 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2943 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2944 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2945 // these, but for now we just have to treat them as normal.
2947 let mut pending_idx = core::usize::MAX;
2948 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2949 if htlc.htlc_id == htlc_id_arg {
2951 InboundHTLCState::Committed => {},
2952 InboundHTLCState::LocalRemoved(ref reason) => {
2953 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2955 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2960 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2961 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2967 if pending_idx == core::usize::MAX {
2968 #[cfg(any(test, fuzzing))]
2969 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2970 // is simply a duplicate fail, not previously failed and we failed-back too early.
2971 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2975 if !self.context.channel_state.can_generate_new_commitment() {
2976 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2977 force_holding_cell = true;
2980 // Now update local state:
2981 if force_holding_cell {
2982 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2983 match pending_update {
2984 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2985 if htlc_id_arg == htlc_id {
2986 #[cfg(any(test, fuzzing))]
2987 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2991 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2992 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2994 if htlc_id_arg == htlc_id {
2995 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2996 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3002 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3003 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3007 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3008 E::Message::name(), &self.context.channel_id());
3010 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3011 htlc.state = err_contents.clone().to_inbound_htlc_state();
3014 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3017 // Message handlers:
3018 /// Updates the state of the channel to indicate that all channels in the batch have received
3019 /// funding_signed and persisted their monitors.
3020 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3021 /// treated as a non-batch channel going forward.
3022 pub fn set_batch_ready(&mut self) {
3023 self.context.is_batch_funding = None;
3024 self.context.channel_state.clear_waiting_for_batch();
3027 /// Unsets the existing funding information.
3029 /// This must only be used if the channel has not yet completed funding and has not been used.
3031 /// Further, the channel must be immediately shut down after this with a call to
3032 /// [`ChannelContext::force_shutdown`].
3033 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3034 debug_assert!(matches!(
3035 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3037 self.context.channel_transaction_parameters.funding_outpoint = None;
3038 self.context.channel_id = temporary_channel_id;
3041 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3042 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3044 pub fn channel_ready<NS: Deref, L: Deref>(
3045 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3046 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3047 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3049 NS::Target: NodeSigner,
3052 if self.context.channel_state.is_peer_disconnected() {
3053 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3054 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3057 if let Some(scid_alias) = msg.short_channel_id_alias {
3058 if Some(scid_alias) != self.context.short_channel_id {
3059 // The scid alias provided can be used to route payments *from* our counterparty,
3060 // i.e. can be used for inbound payments and provided in invoices, but is not used
3061 // when routing outbound payments.
3062 self.context.latest_inbound_scid_alias = Some(scid_alias);
3066 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3067 // batch, but we can receive channel_ready messages.
3068 let mut check_reconnection = false;
3069 match &self.context.channel_state {
3070 ChannelState::AwaitingChannelReady(flags) => {
3071 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3072 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3073 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3074 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3075 check_reconnection = true;
3076 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3077 self.context.channel_state.set_their_channel_ready();
3078 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3079 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3080 self.context.update_time_counter += 1;
3082 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3083 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3086 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3087 ChannelState::ChannelReady(_) => check_reconnection = true,
3088 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3090 if check_reconnection {
3091 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3092 // required, or they're sending a fresh SCID alias.
3093 let expected_point =
3094 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3095 // If they haven't ever sent an updated point, the point they send should match
3097 self.context.counterparty_cur_commitment_point
3098 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3099 // If we've advanced the commitment number once, the second commitment point is
3100 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3101 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3102 self.context.counterparty_prev_commitment_point
3104 // If they have sent updated points, channel_ready is always supposed to match
3105 // their "first" point, which we re-derive here.
3106 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3107 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3108 ).expect("We already advanced, so previous secret keys should have been validated already")))
3110 if expected_point != Some(msg.next_per_commitment_point) {
3111 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3116 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3117 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3119 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3121 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3124 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3125 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3126 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3127 ) -> Result<(), ChannelError>
3128 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3129 FE::Target: FeeEstimator, L::Target: Logger,
3131 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3132 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3134 // We can't accept HTLCs sent after we've sent a shutdown.
3135 if self.context.channel_state.is_local_shutdown_sent() {
3136 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3138 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3139 if self.context.channel_state.is_remote_shutdown_sent() {
3140 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3142 if self.context.channel_state.is_peer_disconnected() {
3143 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3145 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3146 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3148 if msg.amount_msat == 0 {
3149 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3151 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3152 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3155 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3156 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3157 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3158 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3160 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3161 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3164 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3165 // the reserve_satoshis we told them to always have as direct payment so that they lose
3166 // something if we punish them for broadcasting an old state).
3167 // Note that we don't really care about having a small/no to_remote output in our local
3168 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3169 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3170 // present in the next commitment transaction we send them (at least for fulfilled ones,
3171 // failed ones won't modify value_to_self).
3172 // Note that we will send HTLCs which another instance of rust-lightning would think
3173 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3174 // Channel state once they will not be present in the next received commitment
3176 let mut removed_outbound_total_msat = 0;
3177 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3178 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3179 removed_outbound_total_msat += htlc.amount_msat;
3180 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3181 removed_outbound_total_msat += htlc.amount_msat;
3185 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3186 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3189 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3190 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3191 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3193 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3194 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3195 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3196 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3197 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3198 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3199 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3203 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3204 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3205 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3206 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3207 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3208 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3209 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3213 let pending_value_to_self_msat =
3214 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3215 let pending_remote_value_msat =
3216 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3217 if pending_remote_value_msat < msg.amount_msat {
3218 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3221 // Check that the remote can afford to pay for this HTLC on-chain at the current
3222 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3224 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3225 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3226 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3228 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3229 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3233 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3234 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3236 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3237 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3241 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3242 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3246 if !self.context.is_outbound() {
3247 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3248 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3249 // side, only on the sender's. Note that with anchor outputs we are no longer as
3250 // sensitive to fee spikes, so we need to account for them.
3251 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3252 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3253 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3254 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3256 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3257 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3258 // the HTLC, i.e. its status is already set to failing.
3259 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3260 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3263 // Check that they won't violate our local required channel reserve by adding this HTLC.
3264 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3265 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3266 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3267 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3270 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3271 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3273 if msg.cltv_expiry >= 500000000 {
3274 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3277 if self.context.channel_state.is_local_shutdown_sent() {
3278 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3279 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3283 // Now update local state:
3284 self.context.next_counterparty_htlc_id += 1;
3285 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3286 htlc_id: msg.htlc_id,
3287 amount_msat: msg.amount_msat,
3288 payment_hash: msg.payment_hash,
3289 cltv_expiry: msg.cltv_expiry,
3290 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3295 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3297 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3298 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3299 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3300 if htlc.htlc_id == htlc_id {
3301 let outcome = match check_preimage {
3302 None => fail_reason.into(),
3303 Some(payment_preimage) => {
3304 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3305 if payment_hash != htlc.payment_hash {
3306 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3308 OutboundHTLCOutcome::Success(Some(payment_preimage))
3312 OutboundHTLCState::LocalAnnounced(_) =>
3313 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3314 OutboundHTLCState::Committed => {
3315 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3317 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3318 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3323 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3326 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3327 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3328 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3330 if self.context.channel_state.is_peer_disconnected() {
3331 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3334 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3337 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3338 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3339 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3341 if self.context.channel_state.is_peer_disconnected() {
3342 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3345 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3349 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3350 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3351 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3353 if self.context.channel_state.is_peer_disconnected() {
3354 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3357 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3361 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3362 where L::Target: Logger
3364 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3365 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3367 if self.context.channel_state.is_peer_disconnected() {
3368 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3370 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3371 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3374 let funding_script = self.context.get_funding_redeemscript();
3376 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3378 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3379 let commitment_txid = {
3380 let trusted_tx = commitment_stats.tx.trust();
3381 let bitcoin_tx = trusted_tx.built_transaction();
3382 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3384 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3385 log_bytes!(msg.signature.serialize_compact()[..]),
3386 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3387 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3388 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3389 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3393 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3395 // If our counterparty updated the channel fee in this commitment transaction, check that
3396 // they can actually afford the new fee now.
3397 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3398 update_state == FeeUpdateState::RemoteAnnounced
3401 debug_assert!(!self.context.is_outbound());
3402 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3403 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3404 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3407 #[cfg(any(test, fuzzing))]
3409 if self.context.is_outbound() {
3410 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3411 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3412 if let Some(info) = projected_commit_tx_info {
3413 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3414 + self.context.holding_cell_htlc_updates.len();
3415 if info.total_pending_htlcs == total_pending_htlcs
3416 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3417 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3418 && info.feerate == self.context.feerate_per_kw {
3419 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3425 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3426 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3429 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3430 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3431 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3432 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3433 // backwards compatibility, we never use it in production. To provide test coverage, here,
3434 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3435 #[allow(unused_assignments, unused_mut)]
3436 let mut separate_nondust_htlc_sources = false;
3437 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3438 use core::hash::{BuildHasher, Hasher};
3439 // Get a random value using the only std API to do so - the DefaultHasher
3440 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3441 separate_nondust_htlc_sources = rand_val % 2 == 0;
3444 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3445 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3446 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3447 if let Some(_) = htlc.transaction_output_index {
3448 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3449 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3450 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3452 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3453 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3454 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3455 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3456 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3457 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3458 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3459 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3461 if !separate_nondust_htlc_sources {
3462 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3465 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3467 if separate_nondust_htlc_sources {
3468 if let Some(source) = source_opt.take() {
3469 nondust_htlc_sources.push(source);
3472 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3475 let holder_commitment_tx = HolderCommitmentTransaction::new(
3476 commitment_stats.tx,
3478 msg.htlc_signatures.clone(),
3479 &self.context.get_holder_pubkeys().funding_pubkey,
3480 self.context.counterparty_funding_pubkey()
3483 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3484 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3486 // Update state now that we've passed all the can-fail calls...
3487 let mut need_commitment = false;
3488 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3489 if *update_state == FeeUpdateState::RemoteAnnounced {
3490 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3491 need_commitment = true;
3495 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3496 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3497 Some(forward_info.clone())
3499 if let Some(forward_info) = new_forward {
3500 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3501 &htlc.payment_hash, &self.context.channel_id);
3502 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3503 need_commitment = true;
3506 let mut claimed_htlcs = Vec::new();
3507 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3508 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3509 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3510 &htlc.payment_hash, &self.context.channel_id);
3511 // Grab the preimage, if it exists, instead of cloning
3512 let mut reason = OutboundHTLCOutcome::Success(None);
3513 mem::swap(outcome, &mut reason);
3514 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3515 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3516 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3517 // have a `Success(None)` reason. In this case we could forget some HTLC
3518 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3519 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3521 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3523 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3524 need_commitment = true;
3528 self.context.latest_monitor_update_id += 1;
3529 let mut monitor_update = ChannelMonitorUpdate {
3530 update_id: self.context.latest_monitor_update_id,
3531 counterparty_node_id: Some(self.context.counterparty_node_id),
3532 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3533 commitment_tx: holder_commitment_tx,
3534 htlc_outputs: htlcs_and_sigs,
3536 nondust_htlc_sources,
3540 self.context.cur_holder_commitment_transaction_number -= 1;
3541 self.context.expecting_peer_commitment_signed = false;
3542 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3543 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3544 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3546 if self.context.channel_state.is_monitor_update_in_progress() {
3547 // In case we initially failed monitor updating without requiring a response, we need
3548 // to make sure the RAA gets sent first.
3549 self.context.monitor_pending_revoke_and_ack = true;
3550 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3551 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3552 // the corresponding HTLC status updates so that
3553 // get_last_commitment_update_for_send includes the right HTLCs.
3554 self.context.monitor_pending_commitment_signed = true;
3555 let mut additional_update = self.build_commitment_no_status_check(logger);
3556 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3557 // strictly increasing by one, so decrement it here.
3558 self.context.latest_monitor_update_id = monitor_update.update_id;
3559 monitor_update.updates.append(&mut additional_update.updates);
3561 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3562 &self.context.channel_id);
3563 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3566 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3567 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3568 // we'll send one right away when we get the revoke_and_ack when we
3569 // free_holding_cell_htlcs().
3570 let mut additional_update = self.build_commitment_no_status_check(logger);
3571 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3572 // strictly increasing by one, so decrement it here.
3573 self.context.latest_monitor_update_id = monitor_update.update_id;
3574 monitor_update.updates.append(&mut additional_update.updates);
3578 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3579 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3580 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3581 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3584 /// Public version of the below, checking relevant preconditions first.
3585 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3586 /// returns `(None, Vec::new())`.
3587 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3588 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3589 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3590 where F::Target: FeeEstimator, L::Target: Logger
3592 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
3593 self.free_holding_cell_htlcs(fee_estimator, logger)
3594 } else { (None, Vec::new()) }
3597 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3598 /// for our counterparty.
3599 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3600 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3601 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3602 where F::Target: FeeEstimator, L::Target: Logger
3604 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3605 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3606 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3607 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3609 let mut monitor_update = ChannelMonitorUpdate {
3610 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3611 counterparty_node_id: Some(self.context.counterparty_node_id),
3612 updates: Vec::new(),
3615 let mut htlc_updates = Vec::new();
3616 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3617 let mut update_add_count = 0;
3618 let mut update_fulfill_count = 0;
3619 let mut update_fail_count = 0;
3620 let mut htlcs_to_fail = Vec::new();
3621 for htlc_update in htlc_updates.drain(..) {
3622 // Note that this *can* fail, though it should be due to rather-rare conditions on
3623 // fee races with adding too many outputs which push our total payments just over
3624 // the limit. In case it's less rare than I anticipate, we may want to revisit
3625 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3626 // to rebalance channels.
3627 let fail_htlc_res = match &htlc_update {
3628 &HTLCUpdateAwaitingACK::AddHTLC {
3629 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3630 skimmed_fee_msat, blinding_point, ..
3632 match self.send_htlc(
3633 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3634 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3636 Ok(_) => update_add_count += 1,
3639 ChannelError::Ignore(ref msg) => {
3640 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3641 // If we fail to send here, then this HTLC should
3642 // be failed backwards. Failing to send here
3643 // indicates that this HTLC may keep being put back
3644 // into the holding cell without ever being
3645 // successfully forwarded/failed/fulfilled, causing
3646 // our counterparty to eventually close on us.
3647 htlcs_to_fail.push((source.clone(), *payment_hash));
3650 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3657 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3658 // If an HTLC claim was previously added to the holding cell (via
3659 // `get_update_fulfill_htlc`, then generating the claim message itself must
3660 // not fail - any in between attempts to claim the HTLC will have resulted
3661 // in it hitting the holding cell again and we cannot change the state of a
3662 // holding cell HTLC from fulfill to anything else.
3663 let mut additional_monitor_update =
3664 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3665 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3666 { monitor_update } else { unreachable!() };
3667 update_fulfill_count += 1;
3668 monitor_update.updates.append(&mut additional_monitor_update.updates);
3671 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3672 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3673 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3675 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3676 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3677 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3680 if let Some(res) = fail_htlc_res {
3682 Ok(fail_msg_opt) => {
3683 // If an HTLC failure was previously added to the holding cell (via
3684 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3685 // not fail - we should never end up in a state where we double-fail
3686 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3687 // for a full revocation before failing.
3688 debug_assert!(fail_msg_opt.is_some());
3689 update_fail_count += 1;
3691 Err(ChannelError::Ignore(_)) => {},
3693 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3698 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3699 return (None, htlcs_to_fail);
3701 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3702 self.send_update_fee(feerate, false, fee_estimator, logger)
3707 let mut additional_update = self.build_commitment_no_status_check(logger);
3708 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3709 // but we want them to be strictly increasing by one, so reset it here.
3710 self.context.latest_monitor_update_id = monitor_update.update_id;
3711 monitor_update.updates.append(&mut additional_update.updates);
3713 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3714 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3715 update_add_count, update_fulfill_count, update_fail_count);
3717 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3718 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3724 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3725 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3726 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3727 /// generating an appropriate error *after* the channel state has been updated based on the
3728 /// revoke_and_ack message.
3729 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3730 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3731 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3732 where F::Target: FeeEstimator, L::Target: Logger,
3734 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3735 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3737 if self.context.channel_state.is_peer_disconnected() {
3738 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3740 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3741 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3744 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3746 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3747 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3748 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3752 if !self.context.channel_state.is_awaiting_remote_revoke() {
3753 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3754 // haven't given them a new commitment transaction to broadcast). We should probably
3755 // take advantage of this by updating our channel monitor, sending them an error, and
3756 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3757 // lot of work, and there's some chance this is all a misunderstanding anyway.
3758 // We have to do *something*, though, since our signer may get mad at us for otherwise
3759 // jumping a remote commitment number, so best to just force-close and move on.
3760 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3763 #[cfg(any(test, fuzzing))]
3765 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3766 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3769 match &self.context.holder_signer {
3770 ChannelSignerType::Ecdsa(ecdsa) => {
3771 ecdsa.validate_counterparty_revocation(
3772 self.context.cur_counterparty_commitment_transaction_number + 1,
3774 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3776 // TODO (taproot|arik)
3781 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3782 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3783 self.context.latest_monitor_update_id += 1;
3784 let mut monitor_update = ChannelMonitorUpdate {
3785 update_id: self.context.latest_monitor_update_id,
3786 counterparty_node_id: Some(self.context.counterparty_node_id),
3787 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3788 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3789 secret: msg.per_commitment_secret,
3793 // Update state now that we've passed all the can-fail calls...
3794 // (note that we may still fail to generate the new commitment_signed message, but that's
3795 // OK, we step the channel here and *then* if the new generation fails we can fail the
3796 // channel based on that, but stepping stuff here should be safe either way.
3797 self.context.channel_state.clear_awaiting_remote_revoke();
3798 self.context.sent_message_awaiting_response = None;
3799 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3800 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3801 self.context.cur_counterparty_commitment_transaction_number -= 1;
3803 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3804 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3807 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3808 let mut to_forward_infos = Vec::new();
3809 let mut revoked_htlcs = Vec::new();
3810 let mut finalized_claimed_htlcs = Vec::new();
3811 let mut update_fail_htlcs = Vec::new();
3812 let mut update_fail_malformed_htlcs = Vec::new();
3813 let mut require_commitment = false;
3814 let mut value_to_self_msat_diff: i64 = 0;
3817 // Take references explicitly so that we can hold multiple references to self.context.
3818 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3819 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3820 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3822 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3823 pending_inbound_htlcs.retain(|htlc| {
3824 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3825 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3826 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3827 value_to_self_msat_diff += htlc.amount_msat as i64;
3829 *expecting_peer_commitment_signed = true;
3833 pending_outbound_htlcs.retain(|htlc| {
3834 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3835 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3836 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3837 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3839 finalized_claimed_htlcs.push(htlc.source.clone());
3840 // They fulfilled, so we sent them money
3841 value_to_self_msat_diff -= htlc.amount_msat as i64;
3846 for htlc in pending_inbound_htlcs.iter_mut() {
3847 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3849 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3853 let mut state = InboundHTLCState::Committed;
3854 mem::swap(&mut state, &mut htlc.state);
3856 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3857 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3858 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3859 require_commitment = true;
3860 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3861 match forward_info {
3862 PendingHTLCStatus::Fail(fail_msg) => {
3863 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3864 require_commitment = true;
3866 HTLCFailureMsg::Relay(msg) => {
3867 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3868 update_fail_htlcs.push(msg)
3870 HTLCFailureMsg::Malformed(msg) => {
3871 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3872 update_fail_malformed_htlcs.push(msg)
3876 PendingHTLCStatus::Forward(forward_info) => {
3877 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3878 to_forward_infos.push((forward_info, htlc.htlc_id));
3879 htlc.state = InboundHTLCState::Committed;
3885 for htlc in pending_outbound_htlcs.iter_mut() {
3886 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3887 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3888 htlc.state = OutboundHTLCState::Committed;
3889 *expecting_peer_commitment_signed = true;
3891 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3892 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3893 // Grab the preimage, if it exists, instead of cloning
3894 let mut reason = OutboundHTLCOutcome::Success(None);
3895 mem::swap(outcome, &mut reason);
3896 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3897 require_commitment = true;
3901 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3903 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3904 match update_state {
3905 FeeUpdateState::Outbound => {
3906 debug_assert!(self.context.is_outbound());
3907 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3908 self.context.feerate_per_kw = feerate;
3909 self.context.pending_update_fee = None;
3910 self.context.expecting_peer_commitment_signed = true;
3912 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3913 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3914 debug_assert!(!self.context.is_outbound());
3915 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3916 require_commitment = true;
3917 self.context.feerate_per_kw = feerate;
3918 self.context.pending_update_fee = None;
3923 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3924 let release_state_str =
3925 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3926 macro_rules! return_with_htlcs_to_fail {
3927 ($htlcs_to_fail: expr) => {
3928 if !release_monitor {
3929 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3930 update: monitor_update,
3932 return Ok(($htlcs_to_fail, None));
3934 return Ok(($htlcs_to_fail, Some(monitor_update)));
3939 if self.context.channel_state.is_monitor_update_in_progress() {
3940 // We can't actually generate a new commitment transaction (incl by freeing holding
3941 // cells) while we can't update the monitor, so we just return what we have.
3942 if require_commitment {
3943 self.context.monitor_pending_commitment_signed = true;
3944 // When the monitor updating is restored we'll call
3945 // get_last_commitment_update_for_send(), which does not update state, but we're
3946 // definitely now awaiting a remote revoke before we can step forward any more, so
3948 let mut additional_update = self.build_commitment_no_status_check(logger);
3949 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3950 // strictly increasing by one, so decrement it here.
3951 self.context.latest_monitor_update_id = monitor_update.update_id;
3952 monitor_update.updates.append(&mut additional_update.updates);
3954 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3955 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3956 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3957 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3958 return_with_htlcs_to_fail!(Vec::new());
3961 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3962 (Some(mut additional_update), htlcs_to_fail) => {
3963 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3964 // strictly increasing by one, so decrement it here.
3965 self.context.latest_monitor_update_id = monitor_update.update_id;
3966 monitor_update.updates.append(&mut additional_update.updates);
3968 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3969 &self.context.channel_id(), release_state_str);
3971 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3972 return_with_htlcs_to_fail!(htlcs_to_fail);
3974 (None, htlcs_to_fail) => {
3975 if require_commitment {
3976 let mut additional_update = self.build_commitment_no_status_check(logger);
3978 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3979 // strictly increasing by one, so decrement it here.
3980 self.context.latest_monitor_update_id = monitor_update.update_id;
3981 monitor_update.updates.append(&mut additional_update.updates);
3983 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3984 &self.context.channel_id(),
3985 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3988 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3989 return_with_htlcs_to_fail!(htlcs_to_fail);
3991 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3992 &self.context.channel_id(), release_state_str);
3994 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3995 return_with_htlcs_to_fail!(htlcs_to_fail);
4001 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4002 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4003 /// commitment update.
4004 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4005 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4006 where F::Target: FeeEstimator, L::Target: Logger
4008 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4009 assert!(msg_opt.is_none(), "We forced holding cell?");
4012 /// Adds a pending update to this channel. See the doc for send_htlc for
4013 /// further details on the optionness of the return value.
4014 /// If our balance is too low to cover the cost of the next commitment transaction at the
4015 /// new feerate, the update is cancelled.
4017 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4018 /// [`Channel`] if `force_holding_cell` is false.
4019 fn send_update_fee<F: Deref, L: Deref>(
4020 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4021 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4022 ) -> Option<msgs::UpdateFee>
4023 where F::Target: FeeEstimator, L::Target: Logger
4025 if !self.context.is_outbound() {
4026 panic!("Cannot send fee from inbound channel");
4028 if !self.context.is_usable() {
4029 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4031 if !self.context.is_live() {
4032 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4035 // Before proposing a feerate update, check that we can actually afford the new fee.
4036 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4037 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4038 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4039 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4040 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4041 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4042 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4043 //TODO: auto-close after a number of failures?
4044 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4048 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4049 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4050 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4051 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4052 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4053 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4056 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4057 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4061 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4062 force_holding_cell = true;
4065 if force_holding_cell {
4066 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4070 debug_assert!(self.context.pending_update_fee.is_none());
4071 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4073 Some(msgs::UpdateFee {
4074 channel_id: self.context.channel_id,
4079 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4080 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4082 /// No further message handling calls may be made until a channel_reestablish dance has
4084 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4085 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4086 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4087 if self.context.channel_state.is_pre_funded_state() {
4091 if self.context.channel_state.is_peer_disconnected() {
4092 // While the below code should be idempotent, it's simpler to just return early, as
4093 // redundant disconnect events can fire, though they should be rare.
4097 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4098 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4101 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4102 // will be retransmitted.
4103 self.context.last_sent_closing_fee = None;
4104 self.context.pending_counterparty_closing_signed = None;
4105 self.context.closing_fee_limits = None;
4107 let mut inbound_drop_count = 0;
4108 self.context.pending_inbound_htlcs.retain(|htlc| {
4110 InboundHTLCState::RemoteAnnounced(_) => {
4111 // They sent us an update_add_htlc but we never got the commitment_signed.
4112 // We'll tell them what commitment_signed we're expecting next and they'll drop
4113 // this HTLC accordingly
4114 inbound_drop_count += 1;
4117 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4118 // We received a commitment_signed updating this HTLC and (at least hopefully)
4119 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4120 // in response to it yet, so don't touch it.
4123 InboundHTLCState::Committed => true,
4124 InboundHTLCState::LocalRemoved(_) => {
4125 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4126 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4127 // (that we missed). Keep this around for now and if they tell us they missed
4128 // the commitment_signed we can re-transmit the update then.
4133 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4135 if let Some((_, update_state)) = self.context.pending_update_fee {
4136 if update_state == FeeUpdateState::RemoteAnnounced {
4137 debug_assert!(!self.context.is_outbound());
4138 self.context.pending_update_fee = None;
4142 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4143 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4144 // They sent us an update to remove this but haven't yet sent the corresponding
4145 // commitment_signed, we need to move it back to Committed and they can re-send
4146 // the update upon reconnection.
4147 htlc.state = OutboundHTLCState::Committed;
4151 self.context.sent_message_awaiting_response = None;
4153 self.context.channel_state.set_peer_disconnected();
4154 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4158 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4159 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4160 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4161 /// update completes (potentially immediately).
4162 /// The messages which were generated with the monitor update must *not* have been sent to the
4163 /// remote end, and must instead have been dropped. They will be regenerated when
4164 /// [`Self::monitor_updating_restored`] is called.
4166 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4167 /// [`chain::Watch`]: crate::chain::Watch
4168 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4169 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4170 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4171 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4172 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4174 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4175 self.context.monitor_pending_commitment_signed |= resend_commitment;
4176 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4177 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4178 self.context.monitor_pending_failures.append(&mut pending_fails);
4179 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4180 self.context.channel_state.set_monitor_update_in_progress();
4183 /// Indicates that the latest ChannelMonitor update has been committed by the client
4184 /// successfully and we should restore normal operation. Returns messages which should be sent
4185 /// to the remote side.
4186 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4187 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4188 user_config: &UserConfig, best_block_height: u32
4189 ) -> MonitorRestoreUpdates
4192 NS::Target: NodeSigner
4194 assert!(self.context.channel_state.is_monitor_update_in_progress());
4195 self.context.channel_state.clear_monitor_update_in_progress();
4197 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4198 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4199 // first received the funding_signed.
4200 let mut funding_broadcastable =
4201 if self.context.is_outbound() &&
4202 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4203 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
4205 self.context.funding_transaction.take()
4207 // That said, if the funding transaction is already confirmed (ie we're active with a
4208 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4209 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4210 funding_broadcastable = None;
4213 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4214 // (and we assume the user never directly broadcasts the funding transaction and waits for
4215 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4216 // * an inbound channel that failed to persist the monitor on funding_created and we got
4217 // the funding transaction confirmed before the monitor was persisted, or
4218 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4219 let channel_ready = if self.context.monitor_pending_channel_ready {
4220 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4221 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4222 self.context.monitor_pending_channel_ready = false;
4223 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4224 Some(msgs::ChannelReady {
4225 channel_id: self.context.channel_id(),
4226 next_per_commitment_point,
4227 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4231 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4233 let mut accepted_htlcs = Vec::new();
4234 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4235 let mut failed_htlcs = Vec::new();
4236 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4237 let mut finalized_claimed_htlcs = Vec::new();
4238 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4240 if self.context.channel_state.is_peer_disconnected() {
4241 self.context.monitor_pending_revoke_and_ack = false;
4242 self.context.monitor_pending_commitment_signed = false;
4243 return MonitorRestoreUpdates {
4244 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4245 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4249 let raa = if self.context.monitor_pending_revoke_and_ack {
4250 Some(self.get_last_revoke_and_ack())
4252 let commitment_update = if self.context.monitor_pending_commitment_signed {
4253 self.get_last_commitment_update_for_send(logger).ok()
4255 if commitment_update.is_some() {
4256 self.mark_awaiting_response();
4259 self.context.monitor_pending_revoke_and_ack = false;
4260 self.context.monitor_pending_commitment_signed = false;
4261 let order = self.context.resend_order.clone();
4262 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4263 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4264 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4265 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4266 MonitorRestoreUpdates {
4267 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4271 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4272 where F::Target: FeeEstimator, L::Target: Logger
4274 if self.context.is_outbound() {
4275 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4277 if self.context.channel_state.is_peer_disconnected() {
4278 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4280 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4282 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4283 self.context.update_time_counter += 1;
4284 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4285 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4286 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4287 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4288 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4289 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4290 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4291 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4292 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4293 msg.feerate_per_kw, holder_tx_dust_exposure)));
4295 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4296 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4297 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4303 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4305 #[cfg(async_signing)]
4306 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4307 let commitment_update = if self.context.signer_pending_commitment_update {
4308 self.get_last_commitment_update_for_send(logger).ok()
4310 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4311 self.context.get_funding_signed_msg(logger).1
4313 let channel_ready = if funding_signed.is_some() {
4314 self.check_get_channel_ready(0)
4317 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4318 if commitment_update.is_some() { "a" } else { "no" },
4319 if funding_signed.is_some() { "a" } else { "no" },
4320 if channel_ready.is_some() { "a" } else { "no" });
4322 SignerResumeUpdates {
4329 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4330 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4331 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4332 msgs::RevokeAndACK {
4333 channel_id: self.context.channel_id,
4334 per_commitment_secret,
4335 next_per_commitment_point,
4337 next_local_nonce: None,
4341 /// Gets the last commitment update for immediate sending to our peer.
4342 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4343 let mut update_add_htlcs = Vec::new();
4344 let mut update_fulfill_htlcs = Vec::new();
4345 let mut update_fail_htlcs = Vec::new();
4346 let mut update_fail_malformed_htlcs = Vec::new();
4348 for htlc in self.context.pending_outbound_htlcs.iter() {
4349 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4350 update_add_htlcs.push(msgs::UpdateAddHTLC {
4351 channel_id: self.context.channel_id(),
4352 htlc_id: htlc.htlc_id,
4353 amount_msat: htlc.amount_msat,
4354 payment_hash: htlc.payment_hash,
4355 cltv_expiry: htlc.cltv_expiry,
4356 onion_routing_packet: (**onion_packet).clone(),
4357 skimmed_fee_msat: htlc.skimmed_fee_msat,
4358 blinding_point: htlc.blinding_point,
4363 for htlc in self.context.pending_inbound_htlcs.iter() {
4364 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4366 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4367 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4368 channel_id: self.context.channel_id(),
4369 htlc_id: htlc.htlc_id,
4370 reason: err_packet.clone()
4373 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4374 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4375 channel_id: self.context.channel_id(),
4376 htlc_id: htlc.htlc_id,
4377 sha256_of_onion: sha256_of_onion.clone(),
4378 failure_code: failure_code.clone(),
4381 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4382 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4383 channel_id: self.context.channel_id(),
4384 htlc_id: htlc.htlc_id,
4385 payment_preimage: payment_preimage.clone(),
4392 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4393 Some(msgs::UpdateFee {
4394 channel_id: self.context.channel_id(),
4395 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4399 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4400 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4401 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4402 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4403 if self.context.signer_pending_commitment_update {
4404 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4405 self.context.signer_pending_commitment_update = false;
4409 #[cfg(not(async_signing))] {
4410 panic!("Failed to get signature for new commitment state");
4412 #[cfg(async_signing)] {
4413 if !self.context.signer_pending_commitment_update {
4414 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4415 self.context.signer_pending_commitment_update = true;
4420 Ok(msgs::CommitmentUpdate {
4421 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4426 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4427 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4428 if self.context.channel_state.is_local_shutdown_sent() {
4429 assert!(self.context.shutdown_scriptpubkey.is_some());
4430 Some(msgs::Shutdown {
4431 channel_id: self.context.channel_id,
4432 scriptpubkey: self.get_closing_scriptpubkey(),
4437 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4438 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4440 /// Some links printed in log lines are included here to check them during build (when run with
4441 /// `cargo doc --document-private-items`):
4442 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4443 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4444 pub fn channel_reestablish<L: Deref, NS: Deref>(
4445 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4446 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4447 ) -> Result<ReestablishResponses, ChannelError>
4450 NS::Target: NodeSigner
4452 if !self.context.channel_state.is_peer_disconnected() {
4453 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4454 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4455 // just close here instead of trying to recover.
4456 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4459 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4460 msg.next_local_commitment_number == 0 {
4461 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4464 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4465 if msg.next_remote_commitment_number > 0 {
4466 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4467 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4468 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4469 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4470 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4472 if msg.next_remote_commitment_number > our_commitment_transaction {
4473 macro_rules! log_and_panic {
4474 ($err_msg: expr) => {
4475 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4476 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4479 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4480 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4481 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4482 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4483 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4484 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4485 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4486 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4490 // Before we change the state of the channel, we check if the peer is sending a very old
4491 // commitment transaction number, if yes we send a warning message.
4492 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4493 return Err(ChannelError::Warn(format!(
4494 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4495 msg.next_remote_commitment_number,
4496 our_commitment_transaction
4500 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4501 // remaining cases either succeed or ErrorMessage-fail).
4502 self.context.channel_state.clear_peer_disconnected();
4503 self.context.sent_message_awaiting_response = None;
4505 let shutdown_msg = self.get_outbound_shutdown();
4507 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4509 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4510 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4511 if !self.context.channel_state.is_our_channel_ready() ||
4512 self.context.channel_state.is_monitor_update_in_progress() {
4513 if msg.next_remote_commitment_number != 0 {
4514 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4516 // Short circuit the whole handler as there is nothing we can resend them
4517 return Ok(ReestablishResponses {
4518 channel_ready: None,
4519 raa: None, commitment_update: None,
4520 order: RAACommitmentOrder::CommitmentFirst,
4521 shutdown_msg, announcement_sigs,
4525 // We have OurChannelReady set!
4526 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4527 return Ok(ReestablishResponses {
4528 channel_ready: Some(msgs::ChannelReady {
4529 channel_id: self.context.channel_id(),
4530 next_per_commitment_point,
4531 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4533 raa: None, commitment_update: None,
4534 order: RAACommitmentOrder::CommitmentFirst,
4535 shutdown_msg, announcement_sigs,
4539 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4540 // Remote isn't waiting on any RevokeAndACK from us!
4541 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4543 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4544 if self.context.channel_state.is_monitor_update_in_progress() {
4545 self.context.monitor_pending_revoke_and_ack = true;
4548 Some(self.get_last_revoke_and_ack())
4551 debug_assert!(false, "All values should have been handled in the four cases above");
4552 return Err(ChannelError::Close(format!(
4553 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4554 msg.next_remote_commitment_number,
4555 our_commitment_transaction
4559 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4560 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4561 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4562 // the corresponding revoke_and_ack back yet.
4563 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4564 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4565 self.mark_awaiting_response();
4567 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4569 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4570 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4571 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4572 Some(msgs::ChannelReady {
4573 channel_id: self.context.channel_id(),
4574 next_per_commitment_point,
4575 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4579 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4580 if required_revoke.is_some() {
4581 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4583 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4586 Ok(ReestablishResponses {
4587 channel_ready, shutdown_msg, announcement_sigs,
4588 raa: required_revoke,
4589 commitment_update: None,
4590 order: self.context.resend_order.clone(),
4592 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4593 if required_revoke.is_some() {
4594 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4596 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4599 if self.context.channel_state.is_monitor_update_in_progress() {
4600 self.context.monitor_pending_commitment_signed = true;
4601 Ok(ReestablishResponses {
4602 channel_ready, shutdown_msg, announcement_sigs,
4603 commitment_update: None, raa: None,
4604 order: self.context.resend_order.clone(),
4607 Ok(ReestablishResponses {
4608 channel_ready, shutdown_msg, announcement_sigs,
4609 raa: required_revoke,
4610 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4611 order: self.context.resend_order.clone(),
4614 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4615 Err(ChannelError::Close(format!(
4616 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4617 msg.next_local_commitment_number,
4618 next_counterparty_commitment_number,
4621 Err(ChannelError::Close(format!(
4622 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4623 msg.next_local_commitment_number,
4624 next_counterparty_commitment_number,
4629 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4630 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4631 /// at which point they will be recalculated.
4632 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4634 where F::Target: FeeEstimator
4636 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4638 // Propose a range from our current Background feerate to our Normal feerate plus our
4639 // force_close_avoidance_max_fee_satoshis.
4640 // If we fail to come to consensus, we'll have to force-close.
4641 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4642 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4643 // that we don't expect to need fee bumping
4644 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4645 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4647 // The spec requires that (when the channel does not have anchors) we only send absolute
4648 // channel fees no greater than the absolute channel fee on the current commitment
4649 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4650 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4651 // some force-closure by old nodes, but we wanted to close the channel anyway.
4653 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4654 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4655 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4656 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4659 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4660 // below our dust limit, causing the output to disappear. We don't bother handling this
4661 // case, however, as this should only happen if a channel is closed before any (material)
4662 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4663 // come to consensus with our counterparty on appropriate fees, however it should be a
4664 // relatively rare case. We can revisit this later, though note that in order to determine
4665 // if the funders' output is dust we have to know the absolute fee we're going to use.
4666 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4667 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4668 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4669 // We always add force_close_avoidance_max_fee_satoshis to our normal
4670 // feerate-calculated fee, but allow the max to be overridden if we're using a
4671 // target feerate-calculated fee.
4672 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4673 proposed_max_feerate as u64 * tx_weight / 1000)
4675 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4678 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4679 self.context.closing_fee_limits.clone().unwrap()
4682 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4683 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4684 /// this point if we're the funder we should send the initial closing_signed, and in any case
4685 /// shutdown should complete within a reasonable timeframe.
4686 fn closing_negotiation_ready(&self) -> bool {
4687 self.context.closing_negotiation_ready()
4690 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4691 /// an Err if no progress is being made and the channel should be force-closed instead.
4692 /// Should be called on a one-minute timer.
4693 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4694 if self.closing_negotiation_ready() {
4695 if self.context.closing_signed_in_flight {
4696 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4698 self.context.closing_signed_in_flight = true;
4704 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4705 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4706 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4707 where F::Target: FeeEstimator, L::Target: Logger
4709 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4710 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4711 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4712 // that closing_negotiation_ready checks this case (as well as a few others).
4713 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4714 return Ok((None, None, None));
4717 if !self.context.is_outbound() {
4718 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4719 return self.closing_signed(fee_estimator, &msg);
4721 return Ok((None, None, None));
4724 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4725 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4726 if self.context.expecting_peer_commitment_signed {
4727 return Ok((None, None, None));
4730 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4732 assert!(self.context.shutdown_scriptpubkey.is_some());
4733 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4734 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4735 our_min_fee, our_max_fee, total_fee_satoshis);
4737 match &self.context.holder_signer {
4738 ChannelSignerType::Ecdsa(ecdsa) => {
4740 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4741 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4743 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4744 Ok((Some(msgs::ClosingSigned {
4745 channel_id: self.context.channel_id,
4746 fee_satoshis: total_fee_satoshis,
4748 fee_range: Some(msgs::ClosingSignedFeeRange {
4749 min_fee_satoshis: our_min_fee,
4750 max_fee_satoshis: our_max_fee,
4754 // TODO (taproot|arik)
4760 // Marks a channel as waiting for a response from the counterparty. If it's not received
4761 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4763 fn mark_awaiting_response(&mut self) {
4764 self.context.sent_message_awaiting_response = Some(0);
4767 /// Determines whether we should disconnect the counterparty due to not receiving a response
4768 /// within our expected timeframe.
4770 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4771 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4772 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4775 // Don't disconnect when we're not waiting on a response.
4778 *ticks_elapsed += 1;
4779 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4783 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4784 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4786 if self.context.channel_state.is_peer_disconnected() {
4787 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4789 if self.context.channel_state.is_pre_funded_state() {
4790 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4791 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4792 // can do that via error message without getting a connection fail anyway...
4793 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4795 for htlc in self.context.pending_inbound_htlcs.iter() {
4796 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4797 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4800 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4802 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4803 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4806 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4807 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4808 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4811 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4814 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4815 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4816 // any further commitment updates after we set LocalShutdownSent.
4817 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4819 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4822 assert!(send_shutdown);
4823 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4824 Ok(scriptpubkey) => scriptpubkey,
4825 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4827 if !shutdown_scriptpubkey.is_compatible(their_features) {
4828 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4830 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4835 // From here on out, we may not fail!
4837 self.context.channel_state.set_remote_shutdown_sent();
4838 self.context.update_time_counter += 1;
4840 let monitor_update = if update_shutdown_script {
4841 self.context.latest_monitor_update_id += 1;
4842 let monitor_update = ChannelMonitorUpdate {
4843 update_id: self.context.latest_monitor_update_id,
4844 counterparty_node_id: Some(self.context.counterparty_node_id),
4845 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4846 scriptpubkey: self.get_closing_scriptpubkey(),
4849 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4850 self.push_ret_blockable_mon_update(monitor_update)
4852 let shutdown = if send_shutdown {
4853 Some(msgs::Shutdown {
4854 channel_id: self.context.channel_id,
4855 scriptpubkey: self.get_closing_scriptpubkey(),
4859 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4860 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4861 // cell HTLCs and return them to fail the payment.
4862 self.context.holding_cell_update_fee = None;
4863 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4864 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4866 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4867 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4874 self.context.channel_state.set_local_shutdown_sent();
4875 self.context.update_time_counter += 1;
4877 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4880 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4881 let mut tx = closing_tx.trust().built_transaction().clone();
4883 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4885 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4886 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4887 let mut holder_sig = sig.serialize_der().to_vec();
4888 holder_sig.push(EcdsaSighashType::All as u8);
4889 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4890 cp_sig.push(EcdsaSighashType::All as u8);
4891 if funding_key[..] < counterparty_funding_key[..] {
4892 tx.input[0].witness.push(holder_sig);
4893 tx.input[0].witness.push(cp_sig);
4895 tx.input[0].witness.push(cp_sig);
4896 tx.input[0].witness.push(holder_sig);
4899 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4903 pub fn closing_signed<F: Deref>(
4904 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4905 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4906 where F::Target: FeeEstimator
4908 if !self.context.channel_state.is_both_sides_shutdown() {
4909 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4911 if self.context.channel_state.is_peer_disconnected() {
4912 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4914 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4915 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4917 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4918 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4921 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4922 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4925 if self.context.channel_state.is_monitor_update_in_progress() {
4926 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4927 return Ok((None, None, None));
4930 let funding_redeemscript = self.context.get_funding_redeemscript();
4931 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4932 if used_total_fee != msg.fee_satoshis {
4933 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4935 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4937 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4940 // The remote end may have decided to revoke their output due to inconsistent dust
4941 // limits, so check for that case by re-checking the signature here.
4942 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4943 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4944 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4948 for outp in closing_tx.trust().built_transaction().output.iter() {
4949 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4950 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4954 assert!(self.context.shutdown_scriptpubkey.is_some());
4955 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4956 if last_fee == msg.fee_satoshis {
4957 let shutdown_result = ShutdownResult {
4958 closure_reason: ClosureReason::CooperativeClosure,
4959 monitor_update: None,
4960 dropped_outbound_htlcs: Vec::new(),
4961 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4962 channel_id: self.context.channel_id,
4963 user_channel_id: self.context.user_id,
4964 channel_capacity_satoshis: self.context.channel_value_satoshis,
4965 counterparty_node_id: self.context.counterparty_node_id,
4966 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4968 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4969 self.context.channel_state = ChannelState::ShutdownComplete;
4970 self.context.update_time_counter += 1;
4971 return Ok((None, Some(tx), Some(shutdown_result)));
4975 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4977 macro_rules! propose_fee {
4978 ($new_fee: expr) => {
4979 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4980 (closing_tx, $new_fee)
4982 self.build_closing_transaction($new_fee, false)
4985 return match &self.context.holder_signer {
4986 ChannelSignerType::Ecdsa(ecdsa) => {
4988 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4989 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4990 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4991 let shutdown_result = ShutdownResult {
4992 closure_reason: ClosureReason::CooperativeClosure,
4993 monitor_update: None,
4994 dropped_outbound_htlcs: Vec::new(),
4995 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4996 channel_id: self.context.channel_id,
4997 user_channel_id: self.context.user_id,
4998 channel_capacity_satoshis: self.context.channel_value_satoshis,
4999 counterparty_node_id: self.context.counterparty_node_id,
5000 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5002 self.context.channel_state = ChannelState::ShutdownComplete;
5003 self.context.update_time_counter += 1;
5004 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5005 (Some(tx), Some(shutdown_result))
5010 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5011 Ok((Some(msgs::ClosingSigned {
5012 channel_id: self.context.channel_id,
5013 fee_satoshis: used_fee,
5015 fee_range: Some(msgs::ClosingSignedFeeRange {
5016 min_fee_satoshis: our_min_fee,
5017 max_fee_satoshis: our_max_fee,
5019 }), signed_tx, shutdown_result))
5021 // TODO (taproot|arik)
5028 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5029 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5030 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5032 if max_fee_satoshis < our_min_fee {
5033 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5035 if min_fee_satoshis > our_max_fee {
5036 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5039 if !self.context.is_outbound() {
5040 // They have to pay, so pick the highest fee in the overlapping range.
5041 // We should never set an upper bound aside from their full balance
5042 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5043 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5045 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5046 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5047 msg.fee_satoshis, our_min_fee, our_max_fee)));
5049 // The proposed fee is in our acceptable range, accept it and broadcast!
5050 propose_fee!(msg.fee_satoshis);
5053 // Old fee style negotiation. We don't bother to enforce whether they are complying
5054 // with the "making progress" requirements, we just comply and hope for the best.
5055 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5056 if msg.fee_satoshis > last_fee {
5057 if msg.fee_satoshis < our_max_fee {
5058 propose_fee!(msg.fee_satoshis);
5059 } else if last_fee < our_max_fee {
5060 propose_fee!(our_max_fee);
5062 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5065 if msg.fee_satoshis > our_min_fee {
5066 propose_fee!(msg.fee_satoshis);
5067 } else if last_fee > our_min_fee {
5068 propose_fee!(our_min_fee);
5070 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5074 if msg.fee_satoshis < our_min_fee {
5075 propose_fee!(our_min_fee);
5076 } else if msg.fee_satoshis > our_max_fee {
5077 propose_fee!(our_max_fee);
5079 propose_fee!(msg.fee_satoshis);
5085 fn internal_htlc_satisfies_config(
5086 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5087 ) -> Result<(), (&'static str, u16)> {
5088 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5089 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5090 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5091 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5093 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5094 0x1000 | 12, // fee_insufficient
5097 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5099 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5100 0x1000 | 13, // incorrect_cltv_expiry
5106 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5107 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5108 /// unsuccessful, falls back to the previous one if one exists.
5109 pub fn htlc_satisfies_config(
5110 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5111 ) -> Result<(), (&'static str, u16)> {
5112 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5114 if let Some(prev_config) = self.context.prev_config() {
5115 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5122 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5123 self.context.cur_holder_commitment_transaction_number + 1
5126 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5127 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5130 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5131 self.context.cur_counterparty_commitment_transaction_number + 2
5135 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5136 &self.context.holder_signer
5140 pub fn get_value_stat(&self) -> ChannelValueStat {
5142 value_to_self_msat: self.context.value_to_self_msat,
5143 channel_value_msat: self.context.channel_value_satoshis * 1000,
5144 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5145 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5146 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5147 holding_cell_outbound_amount_msat: {
5149 for h in self.context.holding_cell_htlc_updates.iter() {
5151 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5159 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5160 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5164 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5165 /// Allowed in any state (including after shutdown)
5166 pub fn is_awaiting_monitor_update(&self) -> bool {
5167 self.context.channel_state.is_monitor_update_in_progress()
5170 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5171 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5172 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5173 self.context.blocked_monitor_updates[0].update.update_id - 1
5176 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5177 /// further blocked monitor update exists after the next.
5178 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5179 if self.context.blocked_monitor_updates.is_empty() { return None; }
5180 Some((self.context.blocked_monitor_updates.remove(0).update,
5181 !self.context.blocked_monitor_updates.is_empty()))
5184 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5185 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5186 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5187 -> Option<ChannelMonitorUpdate> {
5188 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5189 if !release_monitor {
5190 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5199 pub fn blocked_monitor_updates_pending(&self) -> usize {
5200 self.context.blocked_monitor_updates.len()
5203 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5204 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5205 /// transaction. If the channel is inbound, this implies simply that the channel has not
5207 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5208 if !self.is_awaiting_monitor_update() { return false; }
5210 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5211 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
5213 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5214 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5215 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5218 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5219 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5220 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5221 // waiting for the initial monitor persistence. Thus, we check if our commitment
5222 // transaction numbers have both been iterated only exactly once (for the
5223 // funding_signed), and we're awaiting monitor update.
5225 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5226 // only way to get an awaiting-monitor-update state during initial funding is if the
5227 // initial monitor persistence is still pending).
5229 // Because deciding we're awaiting initial broadcast spuriously could result in
5230 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5231 // we hard-assert here, even in production builds.
5232 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5233 assert!(self.context.monitor_pending_channel_ready);
5234 assert_eq!(self.context.latest_monitor_update_id, 0);
5240 /// Returns true if our channel_ready has been sent
5241 pub fn is_our_channel_ready(&self) -> bool {
5242 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5243 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5246 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5247 pub fn received_shutdown(&self) -> bool {
5248 self.context.channel_state.is_remote_shutdown_sent()
5251 /// Returns true if we either initiated or agreed to shut down the channel.
5252 pub fn sent_shutdown(&self) -> bool {
5253 self.context.channel_state.is_local_shutdown_sent()
5256 /// Returns true if this channel is fully shut down. True here implies that no further actions
5257 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5258 /// will be handled appropriately by the chain monitor.
5259 pub fn is_shutdown(&self) -> bool {
5260 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5263 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5264 self.context.channel_update_status
5267 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5268 self.context.update_time_counter += 1;
5269 self.context.channel_update_status = status;
5272 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5274 // * always when a new block/transactions are confirmed with the new height
5275 // * when funding is signed with a height of 0
5276 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5280 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5281 if funding_tx_confirmations <= 0 {
5282 self.context.funding_tx_confirmation_height = 0;
5285 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5289 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5290 // channel_ready yet.
5291 if self.context.signer_pending_funding {
5295 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5296 // channel_ready until the entire batch is ready.
5297 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
5298 self.context.channel_state.set_our_channel_ready();
5300 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5301 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5302 self.context.update_time_counter += 1;
5304 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5305 // We got a reorg but not enough to trigger a force close, just ignore.
5308 if self.context.funding_tx_confirmation_height != 0 &&
5309 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5311 // We should never see a funding transaction on-chain until we've received
5312 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5313 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5314 // however, may do this and we shouldn't treat it as a bug.
5315 #[cfg(not(fuzzing))]
5316 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5317 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5318 self.context.channel_state.to_u32());
5320 // We got a reorg but not enough to trigger a force close, just ignore.
5324 if need_commitment_update {
5325 if !self.context.channel_state.is_monitor_update_in_progress() {
5326 if !self.context.channel_state.is_peer_disconnected() {
5327 let next_per_commitment_point =
5328 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5329 return Some(msgs::ChannelReady {
5330 channel_id: self.context.channel_id,
5331 next_per_commitment_point,
5332 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5336 self.context.monitor_pending_channel_ready = true;
5342 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5343 /// In the first case, we store the confirmation height and calculating the short channel id.
5344 /// In the second, we simply return an Err indicating we need to be force-closed now.
5345 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5346 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5347 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5348 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5350 NS::Target: NodeSigner,
5353 let mut msgs = (None, None);
5354 if let Some(funding_txo) = self.context.get_funding_txo() {
5355 for &(index_in_block, tx) in txdata.iter() {
5356 // Check if the transaction is the expected funding transaction, and if it is,
5357 // check that it pays the right amount to the right script.
5358 if self.context.funding_tx_confirmation_height == 0 {
5359 if tx.txid() == funding_txo.txid {
5360 let txo_idx = funding_txo.index as usize;
5361 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5362 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5363 if self.context.is_outbound() {
5364 // If we generated the funding transaction and it doesn't match what it
5365 // should, the client is really broken and we should just panic and
5366 // tell them off. That said, because hash collisions happen with high
5367 // probability in fuzzing mode, if we're fuzzing we just close the
5368 // channel and move on.
5369 #[cfg(not(fuzzing))]
5370 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5372 self.context.update_time_counter += 1;
5373 let err_reason = "funding tx had wrong script/value or output index";
5374 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5376 if self.context.is_outbound() {
5377 if !tx.is_coin_base() {
5378 for input in tx.input.iter() {
5379 if input.witness.is_empty() {
5380 // We generated a malleable funding transaction, implying we've
5381 // just exposed ourselves to funds loss to our counterparty.
5382 #[cfg(not(fuzzing))]
5383 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5388 self.context.funding_tx_confirmation_height = height;
5389 self.context.funding_tx_confirmed_in = Some(*block_hash);
5390 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5391 Ok(scid) => Some(scid),
5392 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5395 // If this is a coinbase transaction and not a 0-conf channel
5396 // we should update our min_depth to 100 to handle coinbase maturity
5397 if tx.is_coin_base() &&
5398 self.context.minimum_depth.unwrap_or(0) > 0 &&
5399 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5400 self.context.minimum_depth = Some(COINBASE_MATURITY);
5403 // If we allow 1-conf funding, we may need to check for channel_ready here and
5404 // send it immediately instead of waiting for a best_block_updated call (which
5405 // may have already happened for this block).
5406 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5407 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5408 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5409 msgs = (Some(channel_ready), announcement_sigs);
5412 for inp in tx.input.iter() {
5413 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5414 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5415 return Err(ClosureReason::CommitmentTxConfirmed);
5423 /// When a new block is connected, we check the height of the block against outbound holding
5424 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5425 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5426 /// handled by the ChannelMonitor.
5428 /// If we return Err, the channel may have been closed, at which point the standard
5429 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5432 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5434 pub fn best_block_updated<NS: Deref, L: Deref>(
5435 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5436 node_signer: &NS, user_config: &UserConfig, logger: &L
5437 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5439 NS::Target: NodeSigner,
5442 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5445 fn do_best_block_updated<NS: Deref, L: Deref>(
5446 &mut self, height: u32, highest_header_time: u32,
5447 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5448 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5450 NS::Target: NodeSigner,
5453 let mut timed_out_htlcs = Vec::new();
5454 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5455 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5457 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5458 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5460 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5461 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5462 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5470 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5472 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5473 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5474 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5476 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5477 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5480 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5481 self.context.channel_state.is_our_channel_ready() {
5482 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5483 if self.context.funding_tx_confirmation_height == 0 {
5484 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5485 // zero if it has been reorged out, however in either case, our state flags
5486 // indicate we've already sent a channel_ready
5487 funding_tx_confirmations = 0;
5490 // If we've sent channel_ready (or have both sent and received channel_ready), and
5491 // the funding transaction has become unconfirmed,
5492 // close the channel and hope we can get the latest state on chain (because presumably
5493 // the funding transaction is at least still in the mempool of most nodes).
5495 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5496 // 0-conf channel, but not doing so may lead to the
5497 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5499 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5500 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5501 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5502 return Err(ClosureReason::ProcessingError { err: err_reason });
5504 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5505 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5506 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5507 // If funding_tx_confirmed_in is unset, the channel must not be active
5508 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5509 assert!(!self.context.channel_state.is_our_channel_ready());
5510 return Err(ClosureReason::FundingTimedOut);
5513 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5514 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5516 Ok((None, timed_out_htlcs, announcement_sigs))
5519 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5520 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5521 /// before the channel has reached channel_ready and we can just wait for more blocks.
5522 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5523 if self.context.funding_tx_confirmation_height != 0 {
5524 // We handle the funding disconnection by calling best_block_updated with a height one
5525 // below where our funding was connected, implying a reorg back to conf_height - 1.
5526 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5527 // We use the time field to bump the current time we set on channel updates if its
5528 // larger. If we don't know that time has moved forward, we can just set it to the last
5529 // time we saw and it will be ignored.
5530 let best_time = self.context.update_time_counter;
5531 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5532 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5533 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5534 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5535 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5541 // We never learned about the funding confirmation anyway, just ignore
5546 // Methods to get unprompted messages to send to the remote end (or where we already returned
5547 // something in the handler for the message that prompted this message):
5549 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5550 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5551 /// directions). Should be used for both broadcasted announcements and in response to an
5552 /// AnnouncementSignatures message from the remote peer.
5554 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5557 /// This will only return ChannelError::Ignore upon failure.
5559 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5560 fn get_channel_announcement<NS: Deref>(
5561 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5562 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5563 if !self.context.config.announced_channel {
5564 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5566 if !self.context.is_usable() {
5567 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5570 let short_channel_id = self.context.get_short_channel_id()
5571 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5572 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5573 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5574 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5575 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5577 let msg = msgs::UnsignedChannelAnnouncement {
5578 features: channelmanager::provided_channel_features(&user_config),
5581 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5582 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5583 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5584 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5585 excess_data: Vec::new(),
5591 fn get_announcement_sigs<NS: Deref, L: Deref>(
5592 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5593 best_block_height: u32, logger: &L
5594 ) -> Option<msgs::AnnouncementSignatures>
5596 NS::Target: NodeSigner,
5599 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5603 if !self.context.is_usable() {
5607 if self.context.channel_state.is_peer_disconnected() {
5608 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5612 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5616 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5617 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5620 log_trace!(logger, "{:?}", e);
5624 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5626 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5631 match &self.context.holder_signer {
5632 ChannelSignerType::Ecdsa(ecdsa) => {
5633 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5635 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5640 let short_channel_id = match self.context.get_short_channel_id() {
5642 None => return None,
5645 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5647 Some(msgs::AnnouncementSignatures {
5648 channel_id: self.context.channel_id(),
5650 node_signature: our_node_sig,
5651 bitcoin_signature: our_bitcoin_sig,
5654 // TODO (taproot|arik)
5660 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5662 fn sign_channel_announcement<NS: Deref>(
5663 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5664 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5665 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5666 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5667 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5668 let were_node_one = announcement.node_id_1 == our_node_key;
5670 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5671 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5672 match &self.context.holder_signer {
5673 ChannelSignerType::Ecdsa(ecdsa) => {
5674 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5675 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5676 Ok(msgs::ChannelAnnouncement {
5677 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5678 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5679 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5680 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5681 contents: announcement,
5684 // TODO (taproot|arik)
5689 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5693 /// Processes an incoming announcement_signatures message, providing a fully-signed
5694 /// channel_announcement message which we can broadcast and storing our counterparty's
5695 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5696 pub fn announcement_signatures<NS: Deref>(
5697 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5698 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5699 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5700 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5702 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5704 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5705 return Err(ChannelError::Close(format!(
5706 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5707 &announcement, self.context.get_counterparty_node_id())));
5709 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5710 return Err(ChannelError::Close(format!(
5711 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5712 &announcement, self.context.counterparty_funding_pubkey())));
5715 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5716 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5717 return Err(ChannelError::Ignore(
5718 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5721 self.sign_channel_announcement(node_signer, announcement)
5724 /// Gets a signed channel_announcement for this channel, if we previously received an
5725 /// announcement_signatures from our counterparty.
5726 pub fn get_signed_channel_announcement<NS: Deref>(
5727 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5728 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5729 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5732 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5734 Err(_) => return None,
5736 match self.sign_channel_announcement(node_signer, announcement) {
5737 Ok(res) => Some(res),
5742 /// May panic if called on a channel that wasn't immediately-previously
5743 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5744 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5745 assert!(self.context.channel_state.is_peer_disconnected());
5746 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5747 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5748 // current to_remote balances. However, it no longer has any use, and thus is now simply
5749 // set to a dummy (but valid, as required by the spec) public key.
5750 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5751 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5752 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5753 let mut pk = [2; 33]; pk[1] = 0xff;
5754 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5755 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5756 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5757 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5760 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5763 self.mark_awaiting_response();
5764 msgs::ChannelReestablish {
5765 channel_id: self.context.channel_id(),
5766 // The protocol has two different commitment number concepts - the "commitment
5767 // transaction number", which starts from 0 and counts up, and the "revocation key
5768 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5769 // commitment transaction numbers by the index which will be used to reveal the
5770 // revocation key for that commitment transaction, which means we have to convert them
5771 // to protocol-level commitment numbers here...
5773 // next_local_commitment_number is the next commitment_signed number we expect to
5774 // receive (indicating if they need to resend one that we missed).
5775 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5776 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5777 // receive, however we track it by the next commitment number for a remote transaction
5778 // (which is one further, as they always revoke previous commitment transaction, not
5779 // the one we send) so we have to decrement by 1. Note that if
5780 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5781 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5783 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5784 your_last_per_commitment_secret: remote_last_secret,
5785 my_current_per_commitment_point: dummy_pubkey,
5786 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5787 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5788 // txid of that interactive transaction, else we MUST NOT set it.
5789 next_funding_txid: None,
5794 // Send stuff to our remote peers:
5796 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5797 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5798 /// commitment update.
5800 /// `Err`s will only be [`ChannelError::Ignore`].
5801 pub fn queue_add_htlc<F: Deref, L: Deref>(
5802 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5803 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5804 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5805 ) -> Result<(), ChannelError>
5806 where F::Target: FeeEstimator, L::Target: Logger
5809 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5810 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5811 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5813 if let ChannelError::Ignore(_) = err { /* fine */ }
5814 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5819 /// Adds a pending outbound HTLC to this channel, note that you probably want
5820 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5822 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5824 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5825 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5827 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5828 /// we may not yet have sent the previous commitment update messages and will need to
5829 /// regenerate them.
5831 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5832 /// on this [`Channel`] if `force_holding_cell` is false.
5834 /// `Err`s will only be [`ChannelError::Ignore`].
5835 fn send_htlc<F: Deref, L: Deref>(
5836 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5837 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5838 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5839 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5840 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5841 where F::Target: FeeEstimator, L::Target: Logger
5843 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5844 self.context.channel_state.is_local_shutdown_sent() ||
5845 self.context.channel_state.is_remote_shutdown_sent()
5847 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5849 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5850 if amount_msat > channel_total_msat {
5851 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5854 if amount_msat == 0 {
5855 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5858 let available_balances = self.context.get_available_balances(fee_estimator);
5859 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5860 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5861 available_balances.next_outbound_htlc_minimum_msat)));
5864 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5865 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5866 available_balances.next_outbound_htlc_limit_msat)));
5869 if self.context.channel_state.is_peer_disconnected() {
5870 // Note that this should never really happen, if we're !is_live() on receipt of an
5871 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5872 // the user to send directly into a !is_live() channel. However, if we
5873 // disconnected during the time the previous hop was doing the commitment dance we may
5874 // end up getting here after the forwarding delay. In any case, returning an
5875 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5876 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5879 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
5880 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5881 payment_hash, amount_msat,
5882 if force_holding_cell { "into holding cell" }
5883 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5884 else { "to peer" });
5886 if need_holding_cell {
5887 force_holding_cell = true;
5890 // Now update local state:
5891 if force_holding_cell {
5892 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5897 onion_routing_packet,
5904 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5905 htlc_id: self.context.next_holder_htlc_id,
5907 payment_hash: payment_hash.clone(),
5909 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5915 let res = msgs::UpdateAddHTLC {
5916 channel_id: self.context.channel_id,
5917 htlc_id: self.context.next_holder_htlc_id,
5921 onion_routing_packet,
5925 self.context.next_holder_htlc_id += 1;
5930 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5931 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5932 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5933 // fail to generate this, we still are at least at a position where upgrading their status
5935 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5936 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5937 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5939 if let Some(state) = new_state {
5940 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5944 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5945 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5946 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5947 // Grab the preimage, if it exists, instead of cloning
5948 let mut reason = OutboundHTLCOutcome::Success(None);
5949 mem::swap(outcome, &mut reason);
5950 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5953 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5954 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5955 debug_assert!(!self.context.is_outbound());
5956 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5957 self.context.feerate_per_kw = feerate;
5958 self.context.pending_update_fee = None;
5961 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5963 let (mut htlcs_ref, counterparty_commitment_tx) =
5964 self.build_commitment_no_state_update(logger);
5965 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5966 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5967 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5969 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5970 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5973 self.context.latest_monitor_update_id += 1;
5974 let monitor_update = ChannelMonitorUpdate {
5975 update_id: self.context.latest_monitor_update_id,
5976 counterparty_node_id: Some(self.context.counterparty_node_id),
5977 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5978 commitment_txid: counterparty_commitment_txid,
5979 htlc_outputs: htlcs.clone(),
5980 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5981 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5982 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5983 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5984 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5987 self.context.channel_state.set_awaiting_remote_revoke();
5991 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5992 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5993 where L::Target: Logger
5995 let counterparty_keys = self.context.build_remote_transaction_keys();
5996 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5997 let counterparty_commitment_tx = commitment_stats.tx;
5999 #[cfg(any(test, fuzzing))]
6001 if !self.context.is_outbound() {
6002 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6003 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6004 if let Some(info) = projected_commit_tx_info {
6005 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6006 if info.total_pending_htlcs == total_pending_htlcs
6007 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6008 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6009 && info.feerate == self.context.feerate_per_kw {
6010 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6011 assert_eq!(actual_fee, info.fee);
6017 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6020 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6021 /// generation when we shouldn't change HTLC/channel state.
6022 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6023 // Get the fee tests from `build_commitment_no_state_update`
6024 #[cfg(any(test, fuzzing))]
6025 self.build_commitment_no_state_update(logger);
6027 let counterparty_keys = self.context.build_remote_transaction_keys();
6028 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6029 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6031 match &self.context.holder_signer {
6032 ChannelSignerType::Ecdsa(ecdsa) => {
6033 let (signature, htlc_signatures);
6036 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6037 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6041 let res = ecdsa.sign_counterparty_commitment(
6042 &commitment_stats.tx,
6043 commitment_stats.inbound_htlc_preimages,
6044 commitment_stats.outbound_htlc_preimages,
6045 &self.context.secp_ctx,
6046 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6048 htlc_signatures = res.1;
6050 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6051 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6052 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6053 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6055 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6056 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6057 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6058 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6059 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6060 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6064 Ok((msgs::CommitmentSigned {
6065 channel_id: self.context.channel_id,
6069 partial_signature_with_nonce: None,
6070 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6072 // TODO (taproot|arik)
6078 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6079 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6081 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6082 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6083 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6084 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6085 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6086 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6087 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6088 where F::Target: FeeEstimator, L::Target: Logger
6090 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6091 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6092 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6095 let monitor_update = self.build_commitment_no_status_check(logger);
6096 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6097 Ok(self.push_ret_blockable_mon_update(monitor_update))
6103 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6105 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6106 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6107 fee_base_msat: msg.contents.fee_base_msat,
6108 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6109 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6111 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6113 self.context.counterparty_forwarding_info = new_forwarding_info;
6119 /// Begins the shutdown process, getting a message for the remote peer and returning all
6120 /// holding cell HTLCs for payment failure.
6121 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6122 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6123 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6125 for htlc in self.context.pending_outbound_htlcs.iter() {
6126 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6127 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6130 if self.context.channel_state.is_local_shutdown_sent() {
6131 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6133 else if self.context.channel_state.is_remote_shutdown_sent() {
6134 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6136 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6137 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6139 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6140 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6141 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6144 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6147 // use override shutdown script if provided
6148 let shutdown_scriptpubkey = match override_shutdown_script {
6149 Some(script) => script,
6151 // otherwise, use the shutdown scriptpubkey provided by the signer
6152 match signer_provider.get_shutdown_scriptpubkey() {
6153 Ok(scriptpubkey) => scriptpubkey,
6154 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6158 if !shutdown_scriptpubkey.is_compatible(their_features) {
6159 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6161 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6166 // From here on out, we may not fail!
6167 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6168 self.context.channel_state.set_local_shutdown_sent();
6169 self.context.update_time_counter += 1;
6171 let monitor_update = if update_shutdown_script {
6172 self.context.latest_monitor_update_id += 1;
6173 let monitor_update = ChannelMonitorUpdate {
6174 update_id: self.context.latest_monitor_update_id,
6175 counterparty_node_id: Some(self.context.counterparty_node_id),
6176 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6177 scriptpubkey: self.get_closing_scriptpubkey(),
6180 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6181 self.push_ret_blockable_mon_update(monitor_update)
6183 let shutdown = msgs::Shutdown {
6184 channel_id: self.context.channel_id,
6185 scriptpubkey: self.get_closing_scriptpubkey(),
6188 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6189 // our shutdown until we've committed all of the pending changes.
6190 self.context.holding_cell_update_fee = None;
6191 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6192 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6194 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6195 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6202 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6203 "we can't both complete shutdown and return a monitor update");
6205 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6208 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6209 self.context.holding_cell_htlc_updates.iter()
6210 .flat_map(|htlc_update| {
6212 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6213 => Some((source, payment_hash)),
6217 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6221 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6222 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6223 pub context: ChannelContext<SP>,
6224 pub unfunded_context: UnfundedChannelContext,
6227 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6228 pub fn new<ES: Deref, F: Deref>(
6229 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6230 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6231 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6232 ) -> Result<OutboundV1Channel<SP>, APIError>
6233 where ES::Target: EntropySource,
6234 F::Target: FeeEstimator
6236 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6237 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6238 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6239 let pubkeys = holder_signer.pubkeys().clone();
6241 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6242 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6244 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6245 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6247 let channel_value_msat = channel_value_satoshis * 1000;
6248 if push_msat > channel_value_msat {
6249 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6251 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6252 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6254 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6255 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6256 // Protocol level safety check in place, although it should never happen because
6257 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6258 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6261 let channel_type = Self::get_initial_channel_type(&config, their_features);
6262 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6264 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6265 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6267 (ConfirmationTarget::NonAnchorChannelFee, 0)
6269 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6271 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6272 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6273 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6274 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6277 let mut secp_ctx = Secp256k1::new();
6278 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6280 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6281 match signer_provider.get_shutdown_scriptpubkey() {
6282 Ok(scriptpubkey) => Some(scriptpubkey),
6283 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6287 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6288 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6289 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6293 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6294 Ok(script) => script,
6295 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6298 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6301 context: ChannelContext {
6304 config: LegacyChannelConfig {
6305 options: config.channel_config.clone(),
6306 announced_channel: config.channel_handshake_config.announced_channel,
6307 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6312 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6314 channel_id: temporary_channel_id,
6315 temporary_channel_id: Some(temporary_channel_id),
6316 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6317 announcement_sigs_state: AnnouncementSigsState::NotSent,
6319 channel_value_satoshis,
6321 latest_monitor_update_id: 0,
6323 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6324 shutdown_scriptpubkey,
6327 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6328 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6331 pending_inbound_htlcs: Vec::new(),
6332 pending_outbound_htlcs: Vec::new(),
6333 holding_cell_htlc_updates: Vec::new(),
6334 pending_update_fee: None,
6335 holding_cell_update_fee: None,
6336 next_holder_htlc_id: 0,
6337 next_counterparty_htlc_id: 0,
6338 update_time_counter: 1,
6340 resend_order: RAACommitmentOrder::CommitmentFirst,
6342 monitor_pending_channel_ready: false,
6343 monitor_pending_revoke_and_ack: false,
6344 monitor_pending_commitment_signed: false,
6345 monitor_pending_forwards: Vec::new(),
6346 monitor_pending_failures: Vec::new(),
6347 monitor_pending_finalized_fulfills: Vec::new(),
6349 signer_pending_commitment_update: false,
6350 signer_pending_funding: false,
6352 #[cfg(debug_assertions)]
6353 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6354 #[cfg(debug_assertions)]
6355 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6357 last_sent_closing_fee: None,
6358 pending_counterparty_closing_signed: None,
6359 expecting_peer_commitment_signed: false,
6360 closing_fee_limits: None,
6361 target_closing_feerate_sats_per_kw: None,
6363 funding_tx_confirmed_in: None,
6364 funding_tx_confirmation_height: 0,
6365 short_channel_id: None,
6366 channel_creation_height: current_chain_height,
6368 feerate_per_kw: commitment_feerate,
6369 counterparty_dust_limit_satoshis: 0,
6370 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6371 counterparty_max_htlc_value_in_flight_msat: 0,
6372 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6373 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6374 holder_selected_channel_reserve_satoshis,
6375 counterparty_htlc_minimum_msat: 0,
6376 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6377 counterparty_max_accepted_htlcs: 0,
6378 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6379 minimum_depth: None, // Filled in in accept_channel
6381 counterparty_forwarding_info: None,
6383 channel_transaction_parameters: ChannelTransactionParameters {
6384 holder_pubkeys: pubkeys,
6385 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6386 is_outbound_from_holder: true,
6387 counterparty_parameters: None,
6388 funding_outpoint: None,
6389 channel_type_features: channel_type.clone()
6391 funding_transaction: None,
6392 is_batch_funding: None,
6394 counterparty_cur_commitment_point: None,
6395 counterparty_prev_commitment_point: None,
6396 counterparty_node_id,
6398 counterparty_shutdown_scriptpubkey: None,
6400 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6402 channel_update_status: ChannelUpdateStatus::Enabled,
6403 closing_signed_in_flight: false,
6405 announcement_sigs: None,
6407 #[cfg(any(test, fuzzing))]
6408 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6409 #[cfg(any(test, fuzzing))]
6410 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6412 workaround_lnd_bug_4006: None,
6413 sent_message_awaiting_response: None,
6415 latest_inbound_scid_alias: None,
6416 outbound_scid_alias,
6418 channel_pending_event_emitted: false,
6419 channel_ready_event_emitted: false,
6421 #[cfg(any(test, fuzzing))]
6422 historical_inbound_htlc_fulfills: HashSet::new(),
6427 blocked_monitor_updates: Vec::new(),
6429 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6433 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6434 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6435 let counterparty_keys = self.context.build_remote_transaction_keys();
6436 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6437 let signature = match &self.context.holder_signer {
6438 // TODO (taproot|arik): move match into calling method for Taproot
6439 ChannelSignerType::Ecdsa(ecdsa) => {
6440 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6441 .map(|(sig, _)| sig).ok()?
6443 // TODO (taproot|arik)
6448 if self.context.signer_pending_funding {
6449 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6450 self.context.signer_pending_funding = false;
6453 Some(msgs::FundingCreated {
6454 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6455 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6456 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6459 partial_signature_with_nonce: None,
6461 next_local_nonce: None,
6465 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6466 /// a funding_created message for the remote peer.
6467 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6468 /// or if called on an inbound channel.
6469 /// Note that channel_id changes during this call!
6470 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6471 /// If an Err is returned, it is a ChannelError::Close.
6472 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6473 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6474 if !self.context.is_outbound() {
6475 panic!("Tried to create outbound funding_created message on an inbound channel!");
6478 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6479 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6481 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6483 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6484 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6485 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6486 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6489 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6490 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6492 // Now that we're past error-generating stuff, update our local state:
6494 self.context.channel_state = ChannelState::FundingNegotiated;
6495 self.context.channel_id = funding_txo.to_channel_id();
6497 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6498 // We can skip this if it is a zero-conf channel.
6499 if funding_transaction.is_coin_base() &&
6500 self.context.minimum_depth.unwrap_or(0) > 0 &&
6501 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6502 self.context.minimum_depth = Some(COINBASE_MATURITY);
6505 self.context.funding_transaction = Some(funding_transaction);
6506 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6508 let funding_created = self.get_funding_created_msg(logger);
6509 if funding_created.is_none() {
6510 #[cfg(not(async_signing))] {
6511 panic!("Failed to get signature for new funding creation");
6513 #[cfg(async_signing)] {
6514 if !self.context.signer_pending_funding {
6515 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6516 self.context.signer_pending_funding = true;
6524 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6525 // The default channel type (ie the first one we try) depends on whether the channel is
6526 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6527 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6528 // with no other changes, and fall back to `only_static_remotekey`.
6529 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6530 if !config.channel_handshake_config.announced_channel &&
6531 config.channel_handshake_config.negotiate_scid_privacy &&
6532 their_features.supports_scid_privacy() {
6533 ret.set_scid_privacy_required();
6536 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6537 // set it now. If they don't understand it, we'll fall back to our default of
6538 // `only_static_remotekey`.
6539 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6540 their_features.supports_anchors_zero_fee_htlc_tx() {
6541 ret.set_anchors_zero_fee_htlc_tx_required();
6547 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6548 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6549 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6550 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6551 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6552 ) -> Result<msgs::OpenChannel, ()>
6554 F::Target: FeeEstimator
6556 if !self.context.is_outbound() ||
6558 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6559 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6564 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6565 // We've exhausted our options
6568 // We support opening a few different types of channels. Try removing our additional
6569 // features one by one until we've either arrived at our default or the counterparty has
6572 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6573 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6574 // checks whether the counterparty supports every feature, this would only happen if the
6575 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6577 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6578 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6579 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6580 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6581 } else if self.context.channel_type.supports_scid_privacy() {
6582 self.context.channel_type.clear_scid_privacy();
6584 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6586 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6587 Ok(self.get_open_channel(chain_hash))
6590 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6591 if !self.context.is_outbound() {
6592 panic!("Tried to open a channel for an inbound channel?");
6594 if self.context.have_received_message() {
6595 panic!("Cannot generate an open_channel after we've moved forward");
6598 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6599 panic!("Tried to send an open_channel for a channel that has already advanced");
6602 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6603 let keys = self.context.get_holder_pubkeys();
6607 temporary_channel_id: self.context.channel_id,
6608 funding_satoshis: self.context.channel_value_satoshis,
6609 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6610 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6611 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6612 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6613 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6614 feerate_per_kw: self.context.feerate_per_kw as u32,
6615 to_self_delay: self.context.get_holder_selected_contest_delay(),
6616 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6617 funding_pubkey: keys.funding_pubkey,
6618 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6619 payment_point: keys.payment_point,
6620 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6621 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6622 first_per_commitment_point,
6623 channel_flags: if self.context.config.announced_channel {1} else {0},
6624 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6625 Some(script) => script.clone().into_inner(),
6626 None => Builder::new().into_script(),
6628 channel_type: Some(self.context.channel_type.clone()),
6633 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6634 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6636 // Check sanity of message fields:
6637 if !self.context.is_outbound() {
6638 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6640 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6641 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6643 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6644 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6646 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6647 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6649 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6650 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6652 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6653 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6654 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6656 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6657 if msg.htlc_minimum_msat >= full_channel_value_msat {
6658 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6660 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6661 if msg.to_self_delay > max_delay_acceptable {
6662 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6664 if msg.max_accepted_htlcs < 1 {
6665 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6667 if msg.max_accepted_htlcs > MAX_HTLCS {
6668 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6671 // Now check against optional parameters as set by config...
6672 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6673 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6675 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6676 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6678 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6679 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6681 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6682 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6684 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6685 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6687 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6688 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6690 if msg.minimum_depth > peer_limits.max_minimum_depth {
6691 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6694 if let Some(ty) = &msg.channel_type {
6695 if *ty != self.context.channel_type {
6696 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6698 } else if their_features.supports_channel_type() {
6699 // Assume they've accepted the channel type as they said they understand it.
6701 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6702 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6703 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6705 self.context.channel_type = channel_type.clone();
6706 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6709 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6710 match &msg.shutdown_scriptpubkey {
6711 &Some(ref script) => {
6712 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6713 if script.len() == 0 {
6716 if !script::is_bolt2_compliant(&script, their_features) {
6717 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6719 Some(script.clone())
6722 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6724 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6729 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6730 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6731 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6732 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6733 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6735 if peer_limits.trust_own_funding_0conf {
6736 self.context.minimum_depth = Some(msg.minimum_depth);
6738 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6741 let counterparty_pubkeys = ChannelPublicKeys {
6742 funding_pubkey: msg.funding_pubkey,
6743 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6744 payment_point: msg.payment_point,
6745 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6746 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6749 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6750 selected_contest_delay: msg.to_self_delay,
6751 pubkeys: counterparty_pubkeys,
6754 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6755 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6757 self.context.channel_state = ChannelState::NegotiatingFunding(
6758 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6760 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6765 /// Handles a funding_signed message from the remote end.
6766 /// If this call is successful, broadcast the funding transaction (and not before!)
6767 pub fn funding_signed<L: Deref>(
6768 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6769 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6773 if !self.context.is_outbound() {
6774 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6776 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6777 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6779 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6780 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6781 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6782 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6785 let funding_script = self.context.get_funding_redeemscript();
6787 let counterparty_keys = self.context.build_remote_transaction_keys();
6788 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6789 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6790 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6792 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6793 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6795 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6796 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6798 let trusted_tx = initial_commitment_tx.trust();
6799 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6800 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6801 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6802 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6803 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6807 let holder_commitment_tx = HolderCommitmentTransaction::new(
6808 initial_commitment_tx,
6811 &self.context.get_holder_pubkeys().funding_pubkey,
6812 self.context.counterparty_funding_pubkey()
6816 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6817 if validated.is_err() {
6818 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6821 let funding_redeemscript = self.context.get_funding_redeemscript();
6822 let funding_txo = self.context.get_funding_txo().unwrap();
6823 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6824 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6825 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6826 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6827 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6828 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6829 shutdown_script, self.context.get_holder_selected_contest_delay(),
6830 &self.context.destination_script, (funding_txo, funding_txo_script),
6831 &self.context.channel_transaction_parameters,
6832 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6834 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6835 channel_monitor.provide_initial_counterparty_commitment_tx(
6836 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6837 self.context.cur_counterparty_commitment_transaction_number,
6838 self.context.counterparty_cur_commitment_point.unwrap(),
6839 counterparty_initial_commitment_tx.feerate_per_kw(),
6840 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6841 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6843 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6844 if self.context.is_batch_funding() {
6845 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6847 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6849 self.context.cur_holder_commitment_transaction_number -= 1;
6850 self.context.cur_counterparty_commitment_transaction_number -= 1;
6852 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6854 let mut channel = Channel { context: self.context };
6856 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6857 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6858 Ok((channel, channel_monitor))
6861 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6863 #[cfg(async_signing)]
6864 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6865 if self.context.signer_pending_funding && self.context.is_outbound() {
6866 log_trace!(logger, "Signer unblocked a funding_created");
6867 self.get_funding_created_msg(logger)
6872 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6873 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6874 pub context: ChannelContext<SP>,
6875 pub unfunded_context: UnfundedChannelContext,
6878 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6879 /// [`msgs::OpenChannel`].
6880 pub(super) fn channel_type_from_open_channel(
6881 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6882 our_supported_features: &ChannelTypeFeatures
6883 ) -> Result<ChannelTypeFeatures, ChannelError> {
6884 if let Some(channel_type) = &msg.channel_type {
6885 if channel_type.supports_any_optional_bits() {
6886 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6889 // We only support the channel types defined by the `ChannelManager` in
6890 // `provided_channel_type_features`. The channel type must always support
6891 // `static_remote_key`.
6892 if !channel_type.requires_static_remote_key() {
6893 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6895 // Make sure we support all of the features behind the channel type.
6896 if !channel_type.is_subset(our_supported_features) {
6897 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6899 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6900 if channel_type.requires_scid_privacy() && announced_channel {
6901 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6903 Ok(channel_type.clone())
6905 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6906 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6907 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6913 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6914 /// Creates a new channel from a remote sides' request for one.
6915 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6916 pub fn new<ES: Deref, F: Deref, L: Deref>(
6917 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6918 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6919 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6920 current_chain_height: u32, logger: &L, is_0conf: bool,
6921 ) -> Result<InboundV1Channel<SP>, ChannelError>
6922 where ES::Target: EntropySource,
6923 F::Target: FeeEstimator,
6926 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6927 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6929 // First check the channel type is known, failing before we do anything else if we don't
6930 // support this channel type.
6931 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6933 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6934 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6935 let pubkeys = holder_signer.pubkeys().clone();
6936 let counterparty_pubkeys = ChannelPublicKeys {
6937 funding_pubkey: msg.funding_pubkey,
6938 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6939 payment_point: msg.payment_point,
6940 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6941 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6944 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6945 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6948 // Check sanity of message fields:
6949 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6950 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6952 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6953 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6955 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6956 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6958 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6959 if msg.push_msat > full_channel_value_msat {
6960 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6962 if msg.dust_limit_satoshis > msg.funding_satoshis {
6963 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6965 if msg.htlc_minimum_msat >= full_channel_value_msat {
6966 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6968 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6970 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6971 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6972 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6974 if msg.max_accepted_htlcs < 1 {
6975 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6977 if msg.max_accepted_htlcs > MAX_HTLCS {
6978 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6981 // Now check against optional parameters as set by config...
6982 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6983 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6985 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6986 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6988 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6989 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6991 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6992 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6994 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6995 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6997 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6998 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7000 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7001 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7004 // Convert things into internal flags and prep our state:
7006 if config.channel_handshake_limits.force_announced_channel_preference {
7007 if config.channel_handshake_config.announced_channel != announced_channel {
7008 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
7012 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
7013 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7014 // Protocol level safety check in place, although it should never happen because
7015 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7016 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7018 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
7019 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7021 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7022 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7023 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7025 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7026 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7029 // check if the funder's amount for the initial commitment tx is sufficient
7030 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7031 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7032 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7036 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7037 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7038 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7039 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7042 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7043 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7044 // want to push much to us), our counterparty should always have more than our reserve.
7045 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7046 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7049 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7050 match &msg.shutdown_scriptpubkey {
7051 &Some(ref script) => {
7052 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7053 if script.len() == 0 {
7056 if !script::is_bolt2_compliant(&script, their_features) {
7057 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7059 Some(script.clone())
7062 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7064 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7069 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7070 match signer_provider.get_shutdown_scriptpubkey() {
7071 Ok(scriptpubkey) => Some(scriptpubkey),
7072 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7076 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7077 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7078 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7082 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7083 Ok(script) => script,
7084 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7087 let mut secp_ctx = Secp256k1::new();
7088 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7090 let minimum_depth = if is_0conf {
7093 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7097 context: ChannelContext {
7100 config: LegacyChannelConfig {
7101 options: config.channel_config.clone(),
7103 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7108 inbound_handshake_limits_override: None,
7110 temporary_channel_id: Some(msg.temporary_channel_id),
7111 channel_id: msg.temporary_channel_id,
7112 channel_state: ChannelState::NegotiatingFunding(
7113 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7115 announcement_sigs_state: AnnouncementSigsState::NotSent,
7118 latest_monitor_update_id: 0,
7120 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7121 shutdown_scriptpubkey,
7124 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7125 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7126 value_to_self_msat: msg.push_msat,
7128 pending_inbound_htlcs: Vec::new(),
7129 pending_outbound_htlcs: Vec::new(),
7130 holding_cell_htlc_updates: Vec::new(),
7131 pending_update_fee: None,
7132 holding_cell_update_fee: None,
7133 next_holder_htlc_id: 0,
7134 next_counterparty_htlc_id: 0,
7135 update_time_counter: 1,
7137 resend_order: RAACommitmentOrder::CommitmentFirst,
7139 monitor_pending_channel_ready: false,
7140 monitor_pending_revoke_and_ack: false,
7141 monitor_pending_commitment_signed: false,
7142 monitor_pending_forwards: Vec::new(),
7143 monitor_pending_failures: Vec::new(),
7144 monitor_pending_finalized_fulfills: Vec::new(),
7146 signer_pending_commitment_update: false,
7147 signer_pending_funding: false,
7149 #[cfg(debug_assertions)]
7150 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7151 #[cfg(debug_assertions)]
7152 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7154 last_sent_closing_fee: None,
7155 pending_counterparty_closing_signed: None,
7156 expecting_peer_commitment_signed: false,
7157 closing_fee_limits: None,
7158 target_closing_feerate_sats_per_kw: None,
7160 funding_tx_confirmed_in: None,
7161 funding_tx_confirmation_height: 0,
7162 short_channel_id: None,
7163 channel_creation_height: current_chain_height,
7165 feerate_per_kw: msg.feerate_per_kw,
7166 channel_value_satoshis: msg.funding_satoshis,
7167 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7168 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7169 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7170 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7171 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7172 holder_selected_channel_reserve_satoshis,
7173 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7174 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7175 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7176 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7179 counterparty_forwarding_info: None,
7181 channel_transaction_parameters: ChannelTransactionParameters {
7182 holder_pubkeys: pubkeys,
7183 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7184 is_outbound_from_holder: false,
7185 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7186 selected_contest_delay: msg.to_self_delay,
7187 pubkeys: counterparty_pubkeys,
7189 funding_outpoint: None,
7190 channel_type_features: channel_type.clone()
7192 funding_transaction: None,
7193 is_batch_funding: None,
7195 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7196 counterparty_prev_commitment_point: None,
7197 counterparty_node_id,
7199 counterparty_shutdown_scriptpubkey,
7201 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7203 channel_update_status: ChannelUpdateStatus::Enabled,
7204 closing_signed_in_flight: false,
7206 announcement_sigs: None,
7208 #[cfg(any(test, fuzzing))]
7209 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7210 #[cfg(any(test, fuzzing))]
7211 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7213 workaround_lnd_bug_4006: None,
7214 sent_message_awaiting_response: None,
7216 latest_inbound_scid_alias: None,
7217 outbound_scid_alias: 0,
7219 channel_pending_event_emitted: false,
7220 channel_ready_event_emitted: false,
7222 #[cfg(any(test, fuzzing))]
7223 historical_inbound_htlc_fulfills: HashSet::new(),
7228 blocked_monitor_updates: Vec::new(),
7230 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7236 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7237 /// should be sent back to the counterparty node.
7239 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7240 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7241 if self.context.is_outbound() {
7242 panic!("Tried to send accept_channel for an outbound channel?");
7245 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7246 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7248 panic!("Tried to send accept_channel after channel had moved forward");
7250 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7251 panic!("Tried to send an accept_channel for a channel that has already advanced");
7254 self.generate_accept_channel_message()
7257 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7258 /// inbound channel. If the intention is to accept an inbound channel, use
7259 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7261 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7262 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7263 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7264 let keys = self.context.get_holder_pubkeys();
7266 msgs::AcceptChannel {
7267 temporary_channel_id: self.context.channel_id,
7268 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7269 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7270 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7271 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7272 minimum_depth: self.context.minimum_depth.unwrap(),
7273 to_self_delay: self.context.get_holder_selected_contest_delay(),
7274 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7275 funding_pubkey: keys.funding_pubkey,
7276 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7277 payment_point: keys.payment_point,
7278 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7279 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7280 first_per_commitment_point,
7281 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7282 Some(script) => script.clone().into_inner(),
7283 None => Builder::new().into_script(),
7285 channel_type: Some(self.context.channel_type.clone()),
7287 next_local_nonce: None,
7291 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7292 /// inbound channel without accepting it.
7294 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7296 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7297 self.generate_accept_channel_message()
7300 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7301 let funding_script = self.context.get_funding_redeemscript();
7303 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7304 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7305 let trusted_tx = initial_commitment_tx.trust();
7306 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7307 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7308 // They sign the holder commitment transaction...
7309 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7310 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7311 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7312 encode::serialize_hex(&funding_script), &self.context.channel_id());
7313 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7315 Ok(initial_commitment_tx)
7318 pub fn funding_created<L: Deref>(
7319 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7320 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7324 if self.context.is_outbound() {
7325 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7328 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7329 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7331 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7332 // remember the channel, so it's safe to just send an error_message here and drop the
7334 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7336 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7337 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7338 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7339 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7342 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7343 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7344 // This is an externally observable change before we finish all our checks. In particular
7345 // check_funding_created_signature may fail.
7346 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7348 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7350 Err(ChannelError::Close(e)) => {
7351 self.context.channel_transaction_parameters.funding_outpoint = None;
7352 return Err((self, ChannelError::Close(e)));
7355 // The only error we know how to handle is ChannelError::Close, so we fall over here
7356 // to make sure we don't continue with an inconsistent state.
7357 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7361 let holder_commitment_tx = HolderCommitmentTransaction::new(
7362 initial_commitment_tx,
7365 &self.context.get_holder_pubkeys().funding_pubkey,
7366 self.context.counterparty_funding_pubkey()
7369 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7370 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7373 // Now that we're past error-generating stuff, update our local state:
7375 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7376 self.context.channel_id = funding_txo.to_channel_id();
7377 self.context.cur_counterparty_commitment_transaction_number -= 1;
7378 self.context.cur_holder_commitment_transaction_number -= 1;
7380 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7382 let funding_redeemscript = self.context.get_funding_redeemscript();
7383 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7384 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7385 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7386 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7387 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7388 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7389 shutdown_script, self.context.get_holder_selected_contest_delay(),
7390 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7391 &self.context.channel_transaction_parameters,
7392 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7394 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7395 channel_monitor.provide_initial_counterparty_commitment_tx(
7396 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7397 self.context.cur_counterparty_commitment_transaction_number + 1,
7398 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7399 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7400 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7402 log_info!(logger, "{} funding_signed for peer for channel {}",
7403 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7405 // Promote the channel to a full-fledged one now that we have updated the state and have a
7406 // `ChannelMonitor`.
7407 let mut channel = Channel {
7408 context: self.context,
7410 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7411 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7413 Ok((channel, funding_signed, channel_monitor))
7417 const SERIALIZATION_VERSION: u8 = 3;
7418 const MIN_SERIALIZATION_VERSION: u8 = 3;
7420 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7426 impl Writeable for ChannelUpdateStatus {
7427 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7428 // We only care about writing out the current state as it was announced, ie only either
7429 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7430 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7432 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7433 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7434 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7435 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7441 impl Readable for ChannelUpdateStatus {
7442 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7443 Ok(match <u8 as Readable>::read(reader)? {
7444 0 => ChannelUpdateStatus::Enabled,
7445 1 => ChannelUpdateStatus::Disabled,
7446 _ => return Err(DecodeError::InvalidValue),
7451 impl Writeable for AnnouncementSigsState {
7452 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7453 // We only care about writing out the current state as if we had just disconnected, at
7454 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7456 AnnouncementSigsState::NotSent => 0u8.write(writer),
7457 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7458 AnnouncementSigsState::Committed => 0u8.write(writer),
7459 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7464 impl Readable for AnnouncementSigsState {
7465 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7466 Ok(match <u8 as Readable>::read(reader)? {
7467 0 => AnnouncementSigsState::NotSent,
7468 1 => AnnouncementSigsState::PeerReceived,
7469 _ => return Err(DecodeError::InvalidValue),
7474 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7475 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7476 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7479 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7481 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7482 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7483 // the low bytes now and the optional high bytes later.
7484 let user_id_low = self.context.user_id as u64;
7485 user_id_low.write(writer)?;
7487 // Version 1 deserializers expected to read parts of the config object here. Version 2
7488 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7489 // `minimum_depth` we simply write dummy values here.
7490 writer.write_all(&[0; 8])?;
7492 self.context.channel_id.write(writer)?;
7494 let mut channel_state = self.context.channel_state;
7495 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7496 channel_state.set_peer_disconnected();
7498 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
7500 channel_state.to_u32().write(writer)?;
7502 self.context.channel_value_satoshis.write(writer)?;
7504 self.context.latest_monitor_update_id.write(writer)?;
7506 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7507 // deserialized from that format.
7508 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7509 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7510 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7512 self.context.destination_script.write(writer)?;
7514 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7515 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7516 self.context.value_to_self_msat.write(writer)?;
7518 let mut dropped_inbound_htlcs = 0;
7519 for htlc in self.context.pending_inbound_htlcs.iter() {
7520 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7521 dropped_inbound_htlcs += 1;
7524 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7525 for htlc in self.context.pending_inbound_htlcs.iter() {
7526 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7529 htlc.htlc_id.write(writer)?;
7530 htlc.amount_msat.write(writer)?;
7531 htlc.cltv_expiry.write(writer)?;
7532 htlc.payment_hash.write(writer)?;
7534 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7535 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7537 htlc_state.write(writer)?;
7539 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7541 htlc_state.write(writer)?;
7543 &InboundHTLCState::Committed => {
7546 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7548 removal_reason.write(writer)?;
7553 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7554 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7555 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7557 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7558 for htlc in self.context.pending_outbound_htlcs.iter() {
7559 htlc.htlc_id.write(writer)?;
7560 htlc.amount_msat.write(writer)?;
7561 htlc.cltv_expiry.write(writer)?;
7562 htlc.payment_hash.write(writer)?;
7563 htlc.source.write(writer)?;
7565 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7567 onion_packet.write(writer)?;
7569 &OutboundHTLCState::Committed => {
7572 &OutboundHTLCState::RemoteRemoved(_) => {
7573 // Treat this as a Committed because we haven't received the CS - they'll
7574 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7577 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7579 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7580 preimages.push(preimage);
7582 let reason: Option<&HTLCFailReason> = outcome.into();
7583 reason.write(writer)?;
7585 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7587 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7588 preimages.push(preimage);
7590 let reason: Option<&HTLCFailReason> = outcome.into();
7591 reason.write(writer)?;
7594 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7595 pending_outbound_blinding_points.push(htlc.blinding_point);
7598 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7599 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7600 // Vec of (htlc_id, failure_code, sha256_of_onion)
7601 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7602 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7603 for update in self.context.holding_cell_htlc_updates.iter() {
7605 &HTLCUpdateAwaitingACK::AddHTLC {
7606 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7607 blinding_point, skimmed_fee_msat,
7610 amount_msat.write(writer)?;
7611 cltv_expiry.write(writer)?;
7612 payment_hash.write(writer)?;
7613 source.write(writer)?;
7614 onion_routing_packet.write(writer)?;
7616 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7617 holding_cell_blinding_points.push(blinding_point);
7619 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7621 payment_preimage.write(writer)?;
7622 htlc_id.write(writer)?;
7624 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7626 htlc_id.write(writer)?;
7627 err_packet.write(writer)?;
7629 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7630 htlc_id, failure_code, sha256_of_onion
7632 // We don't want to break downgrading by adding a new variant, so write a dummy
7633 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7634 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7636 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7638 htlc_id.write(writer)?;
7639 dummy_err_packet.write(writer)?;
7644 match self.context.resend_order {
7645 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7646 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7649 self.context.monitor_pending_channel_ready.write(writer)?;
7650 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7651 self.context.monitor_pending_commitment_signed.write(writer)?;
7653 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7654 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7655 pending_forward.write(writer)?;
7656 htlc_id.write(writer)?;
7659 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7660 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7661 htlc_source.write(writer)?;
7662 payment_hash.write(writer)?;
7663 fail_reason.write(writer)?;
7666 if self.context.is_outbound() {
7667 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7668 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7669 Some(feerate).write(writer)?;
7671 // As for inbound HTLCs, if the update was only announced and never committed in a
7672 // commitment_signed, drop it.
7673 None::<u32>.write(writer)?;
7675 self.context.holding_cell_update_fee.write(writer)?;
7677 self.context.next_holder_htlc_id.write(writer)?;
7678 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7679 self.context.update_time_counter.write(writer)?;
7680 self.context.feerate_per_kw.write(writer)?;
7682 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7683 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7684 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7685 // consider the stale state on reload.
7688 self.context.funding_tx_confirmed_in.write(writer)?;
7689 self.context.funding_tx_confirmation_height.write(writer)?;
7690 self.context.short_channel_id.write(writer)?;
7692 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7693 self.context.holder_dust_limit_satoshis.write(writer)?;
7694 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7696 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7697 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7699 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7700 self.context.holder_htlc_minimum_msat.write(writer)?;
7701 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7703 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7704 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7706 match &self.context.counterparty_forwarding_info {
7709 info.fee_base_msat.write(writer)?;
7710 info.fee_proportional_millionths.write(writer)?;
7711 info.cltv_expiry_delta.write(writer)?;
7713 None => 0u8.write(writer)?
7716 self.context.channel_transaction_parameters.write(writer)?;
7717 self.context.funding_transaction.write(writer)?;
7719 self.context.counterparty_cur_commitment_point.write(writer)?;
7720 self.context.counterparty_prev_commitment_point.write(writer)?;
7721 self.context.counterparty_node_id.write(writer)?;
7723 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7725 self.context.commitment_secrets.write(writer)?;
7727 self.context.channel_update_status.write(writer)?;
7729 #[cfg(any(test, fuzzing))]
7730 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7731 #[cfg(any(test, fuzzing))]
7732 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7733 htlc.write(writer)?;
7736 // If the channel type is something other than only-static-remote-key, then we need to have
7737 // older clients fail to deserialize this channel at all. If the type is
7738 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7740 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7741 Some(&self.context.channel_type) } else { None };
7743 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7744 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7745 // a different percentage of the channel value then 10%, which older versions of LDK used
7746 // to set it to before the percentage was made configurable.
7747 let serialized_holder_selected_reserve =
7748 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7749 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7751 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7752 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7753 let serialized_holder_htlc_max_in_flight =
7754 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7755 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7757 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7758 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7760 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7761 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7762 // we write the high bytes as an option here.
7763 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7765 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7767 write_tlv_fields!(writer, {
7768 (0, self.context.announcement_sigs, option),
7769 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7770 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7771 // them twice, once with their original default values above, and once as an option
7772 // here. On the read side, old versions will simply ignore the odd-type entries here,
7773 // and new versions map the default values to None and allow the TLV entries here to
7775 (1, self.context.minimum_depth, option),
7776 (2, chan_type, option),
7777 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7778 (4, serialized_holder_selected_reserve, option),
7779 (5, self.context.config, required),
7780 (6, serialized_holder_htlc_max_in_flight, option),
7781 (7, self.context.shutdown_scriptpubkey, option),
7782 (8, self.context.blocked_monitor_updates, optional_vec),
7783 (9, self.context.target_closing_feerate_sats_per_kw, option),
7784 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7785 (13, self.context.channel_creation_height, required),
7786 (15, preimages, required_vec),
7787 (17, self.context.announcement_sigs_state, required),
7788 (19, self.context.latest_inbound_scid_alias, option),
7789 (21, self.context.outbound_scid_alias, required),
7790 (23, channel_ready_event_emitted, option),
7791 (25, user_id_high_opt, option),
7792 (27, self.context.channel_keys_id, required),
7793 (28, holder_max_accepted_htlcs, option),
7794 (29, self.context.temporary_channel_id, option),
7795 (31, channel_pending_event_emitted, option),
7796 (35, pending_outbound_skimmed_fees, optional_vec),
7797 (37, holding_cell_skimmed_fees, optional_vec),
7798 (38, self.context.is_batch_funding, option),
7799 (39, pending_outbound_blinding_points, optional_vec),
7800 (41, holding_cell_blinding_points, optional_vec),
7801 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7808 const MAX_ALLOC_SIZE: usize = 64*1024;
7809 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7811 ES::Target: EntropySource,
7812 SP::Target: SignerProvider
7814 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7815 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7816 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7818 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7819 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7820 // the low bytes now and the high bytes later.
7821 let user_id_low: u64 = Readable::read(reader)?;
7823 let mut config = Some(LegacyChannelConfig::default());
7825 // Read the old serialization of the ChannelConfig from version 0.0.98.
7826 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7827 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7828 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7829 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7831 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7832 let mut _val: u64 = Readable::read(reader)?;
7835 let channel_id = Readable::read(reader)?;
7836 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7837 let channel_value_satoshis = Readable::read(reader)?;
7839 let latest_monitor_update_id = Readable::read(reader)?;
7841 let mut keys_data = None;
7843 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7844 // the `channel_keys_id` TLV is present below.
7845 let keys_len: u32 = Readable::read(reader)?;
7846 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7847 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7848 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7849 let mut data = [0; 1024];
7850 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7851 reader.read_exact(read_slice)?;
7852 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7856 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7857 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7858 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7861 let destination_script = Readable::read(reader)?;
7863 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7864 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7865 let value_to_self_msat = Readable::read(reader)?;
7867 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7869 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7870 for _ in 0..pending_inbound_htlc_count {
7871 pending_inbound_htlcs.push(InboundHTLCOutput {
7872 htlc_id: Readable::read(reader)?,
7873 amount_msat: Readable::read(reader)?,
7874 cltv_expiry: Readable::read(reader)?,
7875 payment_hash: Readable::read(reader)?,
7876 state: match <u8 as Readable>::read(reader)? {
7877 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7878 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7879 3 => InboundHTLCState::Committed,
7880 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7881 _ => return Err(DecodeError::InvalidValue),
7886 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7887 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7888 for _ in 0..pending_outbound_htlc_count {
7889 pending_outbound_htlcs.push(OutboundHTLCOutput {
7890 htlc_id: Readable::read(reader)?,
7891 amount_msat: Readable::read(reader)?,
7892 cltv_expiry: Readable::read(reader)?,
7893 payment_hash: Readable::read(reader)?,
7894 source: Readable::read(reader)?,
7895 state: match <u8 as Readable>::read(reader)? {
7896 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7897 1 => OutboundHTLCState::Committed,
7899 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7900 OutboundHTLCState::RemoteRemoved(option.into())
7903 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7904 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7907 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7908 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7910 _ => return Err(DecodeError::InvalidValue),
7912 skimmed_fee_msat: None,
7913 blinding_point: None,
7917 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7918 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7919 for _ in 0..holding_cell_htlc_update_count {
7920 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7921 0 => HTLCUpdateAwaitingACK::AddHTLC {
7922 amount_msat: Readable::read(reader)?,
7923 cltv_expiry: Readable::read(reader)?,
7924 payment_hash: Readable::read(reader)?,
7925 source: Readable::read(reader)?,
7926 onion_routing_packet: Readable::read(reader)?,
7927 skimmed_fee_msat: None,
7928 blinding_point: None,
7930 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7931 payment_preimage: Readable::read(reader)?,
7932 htlc_id: Readable::read(reader)?,
7934 2 => HTLCUpdateAwaitingACK::FailHTLC {
7935 htlc_id: Readable::read(reader)?,
7936 err_packet: Readable::read(reader)?,
7938 _ => return Err(DecodeError::InvalidValue),
7942 let resend_order = match <u8 as Readable>::read(reader)? {
7943 0 => RAACommitmentOrder::CommitmentFirst,
7944 1 => RAACommitmentOrder::RevokeAndACKFirst,
7945 _ => return Err(DecodeError::InvalidValue),
7948 let monitor_pending_channel_ready = Readable::read(reader)?;
7949 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7950 let monitor_pending_commitment_signed = Readable::read(reader)?;
7952 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7953 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7954 for _ in 0..monitor_pending_forwards_count {
7955 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7958 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7959 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7960 for _ in 0..monitor_pending_failures_count {
7961 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7964 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7966 let holding_cell_update_fee = Readable::read(reader)?;
7968 let next_holder_htlc_id = Readable::read(reader)?;
7969 let next_counterparty_htlc_id = Readable::read(reader)?;
7970 let update_time_counter = Readable::read(reader)?;
7971 let feerate_per_kw = Readable::read(reader)?;
7973 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7974 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7975 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7976 // consider the stale state on reload.
7977 match <u8 as Readable>::read(reader)? {
7980 let _: u32 = Readable::read(reader)?;
7981 let _: u64 = Readable::read(reader)?;
7982 let _: Signature = Readable::read(reader)?;
7984 _ => return Err(DecodeError::InvalidValue),
7987 let funding_tx_confirmed_in = Readable::read(reader)?;
7988 let funding_tx_confirmation_height = Readable::read(reader)?;
7989 let short_channel_id = Readable::read(reader)?;
7991 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7992 let holder_dust_limit_satoshis = Readable::read(reader)?;
7993 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7994 let mut counterparty_selected_channel_reserve_satoshis = None;
7996 // Read the old serialization from version 0.0.98.
7997 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7999 // Read the 8 bytes of backwards-compatibility data.
8000 let _dummy: u64 = Readable::read(reader)?;
8002 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8003 let holder_htlc_minimum_msat = Readable::read(reader)?;
8004 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8006 let mut minimum_depth = None;
8008 // Read the old serialization from version 0.0.98.
8009 minimum_depth = Some(Readable::read(reader)?);
8011 // Read the 4 bytes of backwards-compatibility data.
8012 let _dummy: u32 = Readable::read(reader)?;
8015 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8017 1 => Some(CounterpartyForwardingInfo {
8018 fee_base_msat: Readable::read(reader)?,
8019 fee_proportional_millionths: Readable::read(reader)?,
8020 cltv_expiry_delta: Readable::read(reader)?,
8022 _ => return Err(DecodeError::InvalidValue),
8025 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8026 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8028 let counterparty_cur_commitment_point = Readable::read(reader)?;
8030 let counterparty_prev_commitment_point = Readable::read(reader)?;
8031 let counterparty_node_id = Readable::read(reader)?;
8033 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8034 let commitment_secrets = Readable::read(reader)?;
8036 let channel_update_status = Readable::read(reader)?;
8038 #[cfg(any(test, fuzzing))]
8039 let mut historical_inbound_htlc_fulfills = HashSet::new();
8040 #[cfg(any(test, fuzzing))]
8042 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8043 for _ in 0..htlc_fulfills_len {
8044 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8048 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8049 Some((feerate, if channel_parameters.is_outbound_from_holder {
8050 FeeUpdateState::Outbound
8052 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8058 let mut announcement_sigs = None;
8059 let mut target_closing_feerate_sats_per_kw = None;
8060 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8061 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8062 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8063 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8064 // only, so we default to that if none was written.
8065 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8066 let mut channel_creation_height = Some(serialized_height);
8067 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8069 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8070 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8071 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8072 let mut latest_inbound_scid_alias = None;
8073 let mut outbound_scid_alias = None;
8074 let mut channel_pending_event_emitted = None;
8075 let mut channel_ready_event_emitted = None;
8077 let mut user_id_high_opt: Option<u64> = None;
8078 let mut channel_keys_id: Option<[u8; 32]> = None;
8079 let mut temporary_channel_id: Option<ChannelId> = None;
8080 let mut holder_max_accepted_htlcs: Option<u16> = None;
8082 let mut blocked_monitor_updates = Some(Vec::new());
8084 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8085 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8087 let mut is_batch_funding: Option<()> = None;
8089 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8090 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8092 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8094 read_tlv_fields!(reader, {
8095 (0, announcement_sigs, option),
8096 (1, minimum_depth, option),
8097 (2, channel_type, option),
8098 (3, counterparty_selected_channel_reserve_satoshis, option),
8099 (4, holder_selected_channel_reserve_satoshis, option),
8100 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8101 (6, holder_max_htlc_value_in_flight_msat, option),
8102 (7, shutdown_scriptpubkey, option),
8103 (8, blocked_monitor_updates, optional_vec),
8104 (9, target_closing_feerate_sats_per_kw, option),
8105 (11, monitor_pending_finalized_fulfills, optional_vec),
8106 (13, channel_creation_height, option),
8107 (15, preimages_opt, optional_vec),
8108 (17, announcement_sigs_state, option),
8109 (19, latest_inbound_scid_alias, option),
8110 (21, outbound_scid_alias, option),
8111 (23, channel_ready_event_emitted, option),
8112 (25, user_id_high_opt, option),
8113 (27, channel_keys_id, option),
8114 (28, holder_max_accepted_htlcs, option),
8115 (29, temporary_channel_id, option),
8116 (31, channel_pending_event_emitted, option),
8117 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8118 (37, holding_cell_skimmed_fees_opt, optional_vec),
8119 (38, is_batch_funding, option),
8120 (39, pending_outbound_blinding_points_opt, optional_vec),
8121 (41, holding_cell_blinding_points_opt, optional_vec),
8122 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8125 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8126 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8127 // If we've gotten to the funding stage of the channel, populate the signer with its
8128 // required channel parameters.
8129 if channel_state >= ChannelState::FundingNegotiated {
8130 holder_signer.provide_channel_parameters(&channel_parameters);
8132 (channel_keys_id, holder_signer)
8134 // `keys_data` can be `None` if we had corrupted data.
8135 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8136 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8137 (holder_signer.channel_keys_id(), holder_signer)
8140 if let Some(preimages) = preimages_opt {
8141 let mut iter = preimages.into_iter();
8142 for htlc in pending_outbound_htlcs.iter_mut() {
8144 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8145 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8147 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8148 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8153 // We expect all preimages to be consumed above
8154 if iter.next().is_some() {
8155 return Err(DecodeError::InvalidValue);
8159 let chan_features = channel_type.as_ref().unwrap();
8160 if !chan_features.is_subset(our_supported_features) {
8161 // If the channel was written by a new version and negotiated with features we don't
8162 // understand yet, refuse to read it.
8163 return Err(DecodeError::UnknownRequiredFeature);
8166 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8167 // To account for that, we're proactively setting/overriding the field here.
8168 channel_parameters.channel_type_features = chan_features.clone();
8170 let mut secp_ctx = Secp256k1::new();
8171 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8173 // `user_id` used to be a single u64 value. In order to remain backwards
8174 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8175 // separate u64 values.
8176 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8178 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8180 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8181 let mut iter = skimmed_fees.into_iter();
8182 for htlc in pending_outbound_htlcs.iter_mut() {
8183 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8185 // We expect all skimmed fees to be consumed above
8186 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8188 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8189 let mut iter = skimmed_fees.into_iter();
8190 for htlc in holding_cell_htlc_updates.iter_mut() {
8191 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8192 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8195 // We expect all skimmed fees to be consumed above
8196 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8198 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8199 let mut iter = blinding_pts.into_iter();
8200 for htlc in pending_outbound_htlcs.iter_mut() {
8201 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8203 // We expect all blinding points to be consumed above
8204 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8206 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8207 let mut iter = blinding_pts.into_iter();
8208 for htlc in holding_cell_htlc_updates.iter_mut() {
8209 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8210 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8213 // We expect all blinding points to be consumed above
8214 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8217 if let Some(malformed_htlcs) = malformed_htlcs {
8218 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8219 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8220 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8221 let matches = *htlc_id == malformed_htlc_id;
8222 if matches { debug_assert!(err_packet.data.is_empty()) }
8225 }).ok_or(DecodeError::InvalidValue)?;
8226 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8227 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8229 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8234 context: ChannelContext {
8237 config: config.unwrap(),
8241 // Note that we don't care about serializing handshake limits as we only ever serialize
8242 // channel data after the handshake has completed.
8243 inbound_handshake_limits_override: None,
8246 temporary_channel_id,
8248 announcement_sigs_state: announcement_sigs_state.unwrap(),
8250 channel_value_satoshis,
8252 latest_monitor_update_id,
8254 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8255 shutdown_scriptpubkey,
8258 cur_holder_commitment_transaction_number,
8259 cur_counterparty_commitment_transaction_number,
8262 holder_max_accepted_htlcs,
8263 pending_inbound_htlcs,
8264 pending_outbound_htlcs,
8265 holding_cell_htlc_updates,
8269 monitor_pending_channel_ready,
8270 monitor_pending_revoke_and_ack,
8271 monitor_pending_commitment_signed,
8272 monitor_pending_forwards,
8273 monitor_pending_failures,
8274 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8276 signer_pending_commitment_update: false,
8277 signer_pending_funding: false,
8280 holding_cell_update_fee,
8281 next_holder_htlc_id,
8282 next_counterparty_htlc_id,
8283 update_time_counter,
8286 #[cfg(debug_assertions)]
8287 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8288 #[cfg(debug_assertions)]
8289 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8291 last_sent_closing_fee: None,
8292 pending_counterparty_closing_signed: None,
8293 expecting_peer_commitment_signed: false,
8294 closing_fee_limits: None,
8295 target_closing_feerate_sats_per_kw,
8297 funding_tx_confirmed_in,
8298 funding_tx_confirmation_height,
8300 channel_creation_height: channel_creation_height.unwrap(),
8302 counterparty_dust_limit_satoshis,
8303 holder_dust_limit_satoshis,
8304 counterparty_max_htlc_value_in_flight_msat,
8305 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8306 counterparty_selected_channel_reserve_satoshis,
8307 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8308 counterparty_htlc_minimum_msat,
8309 holder_htlc_minimum_msat,
8310 counterparty_max_accepted_htlcs,
8313 counterparty_forwarding_info,
8315 channel_transaction_parameters: channel_parameters,
8316 funding_transaction,
8319 counterparty_cur_commitment_point,
8320 counterparty_prev_commitment_point,
8321 counterparty_node_id,
8323 counterparty_shutdown_scriptpubkey,
8327 channel_update_status,
8328 closing_signed_in_flight: false,
8332 #[cfg(any(test, fuzzing))]
8333 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8334 #[cfg(any(test, fuzzing))]
8335 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8337 workaround_lnd_bug_4006: None,
8338 sent_message_awaiting_response: None,
8340 latest_inbound_scid_alias,
8341 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8342 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8344 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8345 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8347 #[cfg(any(test, fuzzing))]
8348 historical_inbound_htlc_fulfills,
8350 channel_type: channel_type.unwrap(),
8353 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8362 use bitcoin::blockdata::constants::ChainHash;
8363 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8364 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8365 use bitcoin::blockdata::opcodes;
8366 use bitcoin::network::constants::Network;
8367 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8368 use crate::ln::{PaymentHash, PaymentPreimage};
8369 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8370 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8371 use crate::ln::channel::InitFeatures;
8372 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8373 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8374 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8375 use crate::ln::msgs;
8376 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8377 use crate::ln::script::ShutdownScript;
8378 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8379 use crate::chain::BestBlock;
8380 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8381 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8382 use crate::chain::transaction::OutPoint;
8383 use crate::routing::router::{Path, RouteHop};
8384 use crate::util::config::UserConfig;
8385 use crate::util::errors::APIError;
8386 use crate::util::ser::{ReadableArgs, Writeable};
8387 use crate::util::test_utils;
8388 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8389 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8390 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8391 use bitcoin::secp256k1::{SecretKey,PublicKey};
8392 use bitcoin::hashes::sha256::Hash as Sha256;
8393 use bitcoin::hashes::Hash;
8394 use bitcoin::hashes::hex::FromHex;
8395 use bitcoin::hash_types::WPubkeyHash;
8396 use bitcoin::blockdata::locktime::absolute::LockTime;
8397 use bitcoin::address::{WitnessProgram, WitnessVersion};
8398 use crate::prelude::*;
8401 fn test_channel_state_order() {
8402 use crate::ln::channel::NegotiatingFundingFlags;
8403 use crate::ln::channel::AwaitingChannelReadyFlags;
8404 use crate::ln::channel::ChannelReadyFlags;
8406 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8407 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8408 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8409 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8412 struct TestFeeEstimator {
8415 impl FeeEstimator for TestFeeEstimator {
8416 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8422 fn test_max_funding_satoshis_no_wumbo() {
8423 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8424 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8425 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8429 signer: InMemorySigner,
8432 impl EntropySource for Keys {
8433 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8436 impl SignerProvider for Keys {
8437 type EcdsaSigner = InMemorySigner;
8439 type TaprootSigner = InMemorySigner;
8441 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8442 self.signer.channel_keys_id()
8445 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8449 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8451 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8452 let secp_ctx = Secp256k1::signing_only();
8453 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8454 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8455 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8458 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8459 let secp_ctx = Secp256k1::signing_only();
8460 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8461 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8465 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8466 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8467 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8471 fn upfront_shutdown_script_incompatibility() {
8472 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8473 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8474 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8477 let seed = [42; 32];
8478 let network = Network::Testnet;
8479 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8480 keys_provider.expect(OnGetShutdownScriptpubkey {
8481 returns: non_v0_segwit_shutdown_script.clone(),
8484 let secp_ctx = Secp256k1::new();
8485 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8486 let config = UserConfig::default();
8487 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8488 Err(APIError::IncompatibleShutdownScript { script }) => {
8489 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8491 Err(e) => panic!("Unexpected error: {:?}", e),
8492 Ok(_) => panic!("Expected error"),
8496 // Check that, during channel creation, we use the same feerate in the open channel message
8497 // as we do in the Channel object creation itself.
8499 fn test_open_channel_msg_fee() {
8500 let original_fee = 253;
8501 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8502 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8503 let secp_ctx = Secp256k1::new();
8504 let seed = [42; 32];
8505 let network = Network::Testnet;
8506 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8508 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8509 let config = UserConfig::default();
8510 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8512 // Now change the fee so we can check that the fee in the open_channel message is the
8513 // same as the old fee.
8514 fee_est.fee_est = 500;
8515 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8516 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8520 fn test_holder_vs_counterparty_dust_limit() {
8521 // Test that when calculating the local and remote commitment transaction fees, the correct
8522 // dust limits are used.
8523 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8524 let secp_ctx = Secp256k1::new();
8525 let seed = [42; 32];
8526 let network = Network::Testnet;
8527 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8528 let logger = test_utils::TestLogger::new();
8529 let best_block = BestBlock::from_network(network);
8531 // Go through the flow of opening a channel between two nodes, making sure
8532 // they have different dust limits.
8534 // Create Node A's channel pointing to Node B's pubkey
8535 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8536 let config = UserConfig::default();
8537 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8539 // Create Node B's channel by receiving Node A's open_channel message
8540 // Make sure A's dust limit is as we expect.
8541 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8542 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8543 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8545 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8546 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8547 accept_channel_msg.dust_limit_satoshis = 546;
8548 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8549 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8551 // Node A --> Node B: funding created
8552 let output_script = node_a_chan.context.get_funding_redeemscript();
8553 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8554 value: 10000000, script_pubkey: output_script.clone(),
8556 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8557 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8558 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8560 // Node B --> Node A: funding signed
8561 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8562 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8564 // Put some inbound and outbound HTLCs in A's channel.
8565 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8566 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8568 amount_msat: htlc_amount_msat,
8569 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8570 cltv_expiry: 300000000,
8571 state: InboundHTLCState::Committed,
8574 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8576 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8577 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8578 cltv_expiry: 200000000,
8579 state: OutboundHTLCState::Committed,
8580 source: HTLCSource::OutboundRoute {
8581 path: Path { hops: Vec::new(), blinded_tail: None },
8582 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8583 first_hop_htlc_msat: 548,
8584 payment_id: PaymentId([42; 32]),
8586 skimmed_fee_msat: None,
8587 blinding_point: None,
8590 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8591 // the dust limit check.
8592 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8593 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8594 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8595 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8597 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8598 // of the HTLCs are seen to be above the dust limit.
8599 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8600 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8601 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8602 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8603 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8607 fn test_timeout_vs_success_htlc_dust_limit() {
8608 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8609 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8610 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8611 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8612 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8613 let secp_ctx = Secp256k1::new();
8614 let seed = [42; 32];
8615 let network = Network::Testnet;
8616 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8618 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8619 let config = UserConfig::default();
8620 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8622 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8623 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8625 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8626 // counted as dust when it shouldn't be.
8627 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8628 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8629 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8630 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8632 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8633 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8634 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8635 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8636 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8638 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8640 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8641 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8642 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8643 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8644 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8646 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8647 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8648 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8649 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8650 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8654 fn channel_reestablish_no_updates() {
8655 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8656 let logger = test_utils::TestLogger::new();
8657 let secp_ctx = Secp256k1::new();
8658 let seed = [42; 32];
8659 let network = Network::Testnet;
8660 let best_block = BestBlock::from_network(network);
8661 let chain_hash = ChainHash::using_genesis_block(network);
8662 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8664 // Go through the flow of opening a channel between two nodes.
8666 // Create Node A's channel pointing to Node B's pubkey
8667 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8668 let config = UserConfig::default();
8669 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8671 // Create Node B's channel by receiving Node A's open_channel message
8672 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8673 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8674 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8676 // Node B --> Node A: accept channel
8677 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8678 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8680 // Node A --> Node B: funding created
8681 let output_script = node_a_chan.context.get_funding_redeemscript();
8682 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8683 value: 10000000, script_pubkey: output_script.clone(),
8685 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8686 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8687 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8689 // Node B --> Node A: funding signed
8690 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8691 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8693 // Now disconnect the two nodes and check that the commitment point in
8694 // Node B's channel_reestablish message is sane.
8695 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8696 let msg = node_b_chan.get_channel_reestablish(&&logger);
8697 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8698 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8699 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8701 // Check that the commitment point in Node A's channel_reestablish message
8703 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8704 let msg = node_a_chan.get_channel_reestablish(&&logger);
8705 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8706 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8707 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8711 fn test_configured_holder_max_htlc_value_in_flight() {
8712 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8713 let logger = test_utils::TestLogger::new();
8714 let secp_ctx = Secp256k1::new();
8715 let seed = [42; 32];
8716 let network = Network::Testnet;
8717 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8718 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8719 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8721 let mut config_2_percent = UserConfig::default();
8722 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8723 let mut config_99_percent = UserConfig::default();
8724 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8725 let mut config_0_percent = UserConfig::default();
8726 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8727 let mut config_101_percent = UserConfig::default();
8728 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8730 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8731 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8732 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8733 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8734 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8735 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8737 // Test with the upper bound - 1 of valid values (99%).
8738 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8739 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8740 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8742 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8744 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8745 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8746 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8747 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8748 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8749 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8751 // Test with the upper bound - 1 of valid values (99%).
8752 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8753 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8754 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8756 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8757 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8758 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8759 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8760 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8762 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8763 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8765 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8766 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8767 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8769 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8770 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8771 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8772 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8773 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8775 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8776 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8778 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8779 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8780 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8784 fn test_configured_holder_selected_channel_reserve_satoshis() {
8786 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8787 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8788 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8790 // Test with valid but unreasonably high channel reserves
8791 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8792 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8793 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8795 // Test with calculated channel reserve less than lower bound
8796 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8797 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8799 // Test with invalid channel reserves since sum of both is greater than or equal
8801 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8802 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8805 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8806 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8807 let logger = test_utils::TestLogger::new();
8808 let secp_ctx = Secp256k1::new();
8809 let seed = [42; 32];
8810 let network = Network::Testnet;
8811 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8812 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8813 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8816 let mut outbound_node_config = UserConfig::default();
8817 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8818 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8820 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8821 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8823 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8824 let mut inbound_node_config = UserConfig::default();
8825 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8827 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8828 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8830 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8832 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8833 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8835 // Channel Negotiations failed
8836 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8837 assert!(result.is_err());
8842 fn channel_update() {
8843 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8844 let logger = test_utils::TestLogger::new();
8845 let secp_ctx = Secp256k1::new();
8846 let seed = [42; 32];
8847 let network = Network::Testnet;
8848 let best_block = BestBlock::from_network(network);
8849 let chain_hash = ChainHash::using_genesis_block(network);
8850 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8852 // Create Node A's channel pointing to Node B's pubkey
8853 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8854 let config = UserConfig::default();
8855 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8857 // Create Node B's channel by receiving Node A's open_channel message
8858 // Make sure A's dust limit is as we expect.
8859 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8860 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8861 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8863 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8864 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8865 accept_channel_msg.dust_limit_satoshis = 546;
8866 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8867 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8869 // Node A --> Node B: funding created
8870 let output_script = node_a_chan.context.get_funding_redeemscript();
8871 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8872 value: 10000000, script_pubkey: output_script.clone(),
8874 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8875 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8876 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8878 // Node B --> Node A: funding signed
8879 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8880 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8882 // Make sure that receiving a channel update will update the Channel as expected.
8883 let update = ChannelUpdate {
8884 contents: UnsignedChannelUpdate {
8886 short_channel_id: 0,
8889 cltv_expiry_delta: 100,
8890 htlc_minimum_msat: 5,
8891 htlc_maximum_msat: MAX_VALUE_MSAT,
8893 fee_proportional_millionths: 11,
8894 excess_data: Vec::new(),
8896 signature: Signature::from(unsafe { FFISignature::new() })
8898 assert!(node_a_chan.channel_update(&update).unwrap());
8900 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8901 // change our official htlc_minimum_msat.
8902 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8903 match node_a_chan.context.counterparty_forwarding_info() {
8905 assert_eq!(info.cltv_expiry_delta, 100);
8906 assert_eq!(info.fee_base_msat, 110);
8907 assert_eq!(info.fee_proportional_millionths, 11);
8909 None => panic!("expected counterparty forwarding info to be Some")
8912 assert!(!node_a_chan.channel_update(&update).unwrap());
8916 fn blinding_point_skimmed_fee_malformed_ser() {
8917 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8919 let logger = test_utils::TestLogger::new();
8920 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8921 let secp_ctx = Secp256k1::new();
8922 let seed = [42; 32];
8923 let network = Network::Testnet;
8924 let best_block = BestBlock::from_network(network);
8925 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8927 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8928 let config = UserConfig::default();
8929 let features = channelmanager::provided_init_features(&config);
8930 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
8931 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
8933 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
8934 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8935 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
8937 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
8938 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8939 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
8941 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8942 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
8943 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
8944 Ok((chan, _, _)) => chan,
8945 Err((_, e)) => panic!("{}", e),
8948 let dummy_htlc_source = HTLCSource::OutboundRoute {
8950 hops: vec![RouteHop {
8951 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8952 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8953 cltv_expiry_delta: 0, maybe_announced_channel: false,
8957 session_priv: test_utils::privkey(42),
8958 first_hop_htlc_msat: 0,
8959 payment_id: PaymentId([42; 32]),
8961 let dummy_outbound_output = OutboundHTLCOutput {
8964 payment_hash: PaymentHash([43; 32]),
8966 state: OutboundHTLCState::Committed,
8967 source: dummy_htlc_source.clone(),
8968 skimmed_fee_msat: None,
8969 blinding_point: None,
8971 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8972 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8974 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8977 htlc.skimmed_fee_msat = Some(1);
8980 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8982 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8985 payment_hash: PaymentHash([43; 32]),
8986 source: dummy_htlc_source.clone(),
8987 onion_routing_packet: msgs::OnionPacket {
8989 public_key: Ok(test_utils::pubkey(1)),
8990 hop_data: [0; 20*65],
8993 skimmed_fee_msat: None,
8994 blinding_point: None,
8996 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8997 payment_preimage: PaymentPreimage([42; 32]),
9000 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9001 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9003 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9004 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9006 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9009 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9010 } else if i % 5 == 1 {
9011 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9012 } else if i % 5 == 2 {
9013 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9014 if let HTLCUpdateAwaitingACK::AddHTLC {
9015 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9016 } = &mut dummy_add {
9017 *blinding_point = Some(test_utils::pubkey(42 + i));
9018 *skimmed_fee_msat = Some(42);
9020 holding_cell_htlc_updates.push(dummy_add);
9021 } else if i % 5 == 3 {
9022 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9024 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9027 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9029 // Encode and decode the channel and ensure that the HTLCs within are the same.
9030 let encoded_chan = chan.encode();
9031 let mut s = crate::io::Cursor::new(&encoded_chan);
9032 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9033 let features = channelmanager::provided_channel_type_features(&config);
9034 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9035 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9036 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9039 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9041 fn outbound_commitment_test() {
9042 use bitcoin::sighash;
9043 use bitcoin::consensus::encode::serialize;
9044 use bitcoin::sighash::EcdsaSighashType;
9045 use bitcoin::hashes::hex::FromHex;
9046 use bitcoin::hash_types::Txid;
9047 use bitcoin::secp256k1::Message;
9048 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9049 use crate::ln::PaymentPreimage;
9050 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9051 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9052 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9053 use crate::util::logger::Logger;
9054 use crate::sync::Arc;
9055 use core::str::FromStr;
9056 use hex::DisplayHex;
9058 // Test vectors from BOLT 3 Appendices C and F (anchors):
9059 let feeest = TestFeeEstimator{fee_est: 15000};
9060 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9061 let secp_ctx = Secp256k1::new();
9063 let mut signer = InMemorySigner::new(
9065 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9066 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9067 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9068 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9069 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9071 // These aren't set in the test vectors:
9072 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9078 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9079 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9080 let keys_provider = Keys { signer: signer.clone() };
9082 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9083 let mut config = UserConfig::default();
9084 config.channel_handshake_config.announced_channel = false;
9085 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9086 chan.context.holder_dust_limit_satoshis = 546;
9087 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9089 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9091 let counterparty_pubkeys = ChannelPublicKeys {
9092 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9093 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9094 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9095 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9096 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9098 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9099 CounterpartyChannelTransactionParameters {
9100 pubkeys: counterparty_pubkeys.clone(),
9101 selected_contest_delay: 144
9103 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9104 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9106 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9107 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9109 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9110 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9112 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9113 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9115 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9116 // derived from a commitment_seed, so instead we copy it here and call
9117 // build_commitment_transaction.
9118 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9119 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9120 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9121 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9122 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9124 macro_rules! test_commitment {
9125 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9126 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9127 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9131 macro_rules! test_commitment_with_anchors {
9132 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9133 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9134 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9138 macro_rules! test_commitment_common {
9139 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9140 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9142 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9143 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9145 let htlcs = commitment_stats.htlcs_included.drain(..)
9146 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9148 (commitment_stats.tx, htlcs)
9150 let trusted_tx = commitment_tx.trust();
9151 let unsigned_tx = trusted_tx.built_transaction();
9152 let redeemscript = chan.context.get_funding_redeemscript();
9153 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9154 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9155 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9156 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9158 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9159 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9160 let mut counterparty_htlc_sigs = Vec::new();
9161 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9163 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9164 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9165 counterparty_htlc_sigs.push(remote_signature);
9167 assert_eq!(htlcs.len(), per_htlc.len());
9169 let holder_commitment_tx = HolderCommitmentTransaction::new(
9170 commitment_tx.clone(),
9171 counterparty_signature,
9172 counterparty_htlc_sigs,
9173 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9174 chan.context.counterparty_funding_pubkey()
9176 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9177 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9179 let funding_redeemscript = chan.context.get_funding_redeemscript();
9180 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9181 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9183 // ((htlc, counterparty_sig), (index, holder_sig))
9184 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9187 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9188 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9190 let ref htlc = htlcs[$htlc_idx];
9191 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9192 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9193 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9194 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9195 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9196 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9197 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9199 let mut preimage: Option<PaymentPreimage> = None;
9202 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9203 if out == htlc.payment_hash {
9204 preimage = Some(PaymentPreimage([i; 32]));
9208 assert!(preimage.is_some());
9211 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9212 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9213 channel_derivation_parameters: ChannelDerivationParameters {
9214 value_satoshis: chan.context.channel_value_satoshis,
9215 keys_id: chan.context.channel_keys_id,
9216 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9218 commitment_txid: trusted_tx.txid(),
9219 per_commitment_number: trusted_tx.commitment_number(),
9220 per_commitment_point: trusted_tx.per_commitment_point(),
9221 feerate_per_kw: trusted_tx.feerate_per_kw(),
9223 preimage: preimage.clone(),
9224 counterparty_sig: *htlc_counterparty_sig,
9225 }, &secp_ctx).unwrap();
9226 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9227 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9229 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9230 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9231 let trusted_tx = holder_commitment_tx.trust();
9232 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9233 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9234 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9236 assert!(htlc_counterparty_sig_iter.next().is_none());
9240 // anchors: simple commitment tx with no HTLCs and single anchor
9241 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9242 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9243 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9245 // simple commitment tx with no HTLCs
9246 chan.context.value_to_self_msat = 7000000000;
9248 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9249 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9250 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9252 // anchors: simple commitment tx with no HTLCs
9253 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9254 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9255 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9257 chan.context.pending_inbound_htlcs.push({
9258 let mut out = InboundHTLCOutput{
9260 amount_msat: 1000000,
9262 payment_hash: PaymentHash([0; 32]),
9263 state: InboundHTLCState::Committed,
9265 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9268 chan.context.pending_inbound_htlcs.push({
9269 let mut out = InboundHTLCOutput{
9271 amount_msat: 2000000,
9273 payment_hash: PaymentHash([0; 32]),
9274 state: InboundHTLCState::Committed,
9276 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9279 chan.context.pending_outbound_htlcs.push({
9280 let mut out = OutboundHTLCOutput{
9282 amount_msat: 2000000,
9284 payment_hash: PaymentHash([0; 32]),
9285 state: OutboundHTLCState::Committed,
9286 source: HTLCSource::dummy(),
9287 skimmed_fee_msat: None,
9288 blinding_point: None,
9290 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9293 chan.context.pending_outbound_htlcs.push({
9294 let mut out = OutboundHTLCOutput{
9296 amount_msat: 3000000,
9298 payment_hash: PaymentHash([0; 32]),
9299 state: OutboundHTLCState::Committed,
9300 source: HTLCSource::dummy(),
9301 skimmed_fee_msat: None,
9302 blinding_point: None,
9304 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9307 chan.context.pending_inbound_htlcs.push({
9308 let mut out = InboundHTLCOutput{
9310 amount_msat: 4000000,
9312 payment_hash: PaymentHash([0; 32]),
9313 state: InboundHTLCState::Committed,
9315 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9319 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9320 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9321 chan.context.feerate_per_kw = 0;
9323 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9324 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9325 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9328 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9329 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9330 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9333 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9334 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9335 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9338 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9339 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9340 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9343 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9344 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9345 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9348 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9349 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9350 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9353 // commitment tx with seven outputs untrimmed (maximum feerate)
9354 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9355 chan.context.feerate_per_kw = 647;
9357 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9358 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9359 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9362 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9363 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9364 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9367 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9368 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9369 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9372 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9373 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9374 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9377 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9378 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9379 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9382 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9383 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9384 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9387 // commitment tx with six outputs untrimmed (minimum feerate)
9388 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9389 chan.context.feerate_per_kw = 648;
9391 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9392 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9393 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9396 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9397 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9398 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9401 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9402 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9403 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9406 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9407 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9408 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9411 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9412 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9413 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9416 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9417 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9418 chan.context.feerate_per_kw = 645;
9419 chan.context.holder_dust_limit_satoshis = 1001;
9421 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9422 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9423 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9426 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9427 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9428 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9431 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9432 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9433 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9436 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9437 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9438 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9441 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9442 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9443 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9446 // commitment tx with six outputs untrimmed (maximum feerate)
9447 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9448 chan.context.feerate_per_kw = 2069;
9449 chan.context.holder_dust_limit_satoshis = 546;
9451 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9452 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9453 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9456 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9457 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9458 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9461 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9462 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9463 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9466 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9467 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9468 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9471 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9472 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9473 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9476 // commitment tx with five outputs untrimmed (minimum feerate)
9477 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9478 chan.context.feerate_per_kw = 2070;
9480 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9481 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9482 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9485 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9486 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9487 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9490 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9491 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9492 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9495 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9496 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9497 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9500 // commitment tx with five outputs untrimmed (maximum feerate)
9501 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9502 chan.context.feerate_per_kw = 2194;
9504 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9505 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9506 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9509 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9510 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9511 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9514 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9515 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9516 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9519 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9520 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9521 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9524 // commitment tx with four outputs untrimmed (minimum feerate)
9525 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9526 chan.context.feerate_per_kw = 2195;
9528 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9529 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9530 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9533 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9534 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9535 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9538 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9539 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9540 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9543 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9544 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9545 chan.context.feerate_per_kw = 2185;
9546 chan.context.holder_dust_limit_satoshis = 2001;
9547 let cached_channel_type = chan.context.channel_type;
9548 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9550 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9551 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9552 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9555 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9556 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9557 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9560 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9561 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9562 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9565 // commitment tx with four outputs untrimmed (maximum feerate)
9566 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9567 chan.context.feerate_per_kw = 3702;
9568 chan.context.holder_dust_limit_satoshis = 546;
9569 chan.context.channel_type = cached_channel_type.clone();
9571 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9572 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9573 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9576 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9577 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9578 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9581 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9582 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9583 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9586 // commitment tx with three outputs untrimmed (minimum feerate)
9587 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9588 chan.context.feerate_per_kw = 3703;
9590 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9591 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9592 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9595 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9596 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9597 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9600 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9601 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9602 chan.context.feerate_per_kw = 3687;
9603 chan.context.holder_dust_limit_satoshis = 3001;
9604 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9606 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9607 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9608 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9611 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9612 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9613 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9616 // commitment tx with three outputs untrimmed (maximum feerate)
9617 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9618 chan.context.feerate_per_kw = 4914;
9619 chan.context.holder_dust_limit_satoshis = 546;
9620 chan.context.channel_type = cached_channel_type.clone();
9622 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9623 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9624 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9627 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9628 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9629 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9632 // commitment tx with two outputs untrimmed (minimum feerate)
9633 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9634 chan.context.feerate_per_kw = 4915;
9635 chan.context.holder_dust_limit_satoshis = 546;
9637 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9638 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9639 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9641 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9642 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9643 chan.context.feerate_per_kw = 4894;
9644 chan.context.holder_dust_limit_satoshis = 4001;
9645 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9647 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9648 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9649 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9651 // commitment tx with two outputs untrimmed (maximum feerate)
9652 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9653 chan.context.feerate_per_kw = 9651180;
9654 chan.context.holder_dust_limit_satoshis = 546;
9655 chan.context.channel_type = cached_channel_type.clone();
9657 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9658 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9659 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9661 // commitment tx with one output untrimmed (minimum feerate)
9662 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9663 chan.context.feerate_per_kw = 9651181;
9665 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9666 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9667 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9669 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9670 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9671 chan.context.feerate_per_kw = 6216010;
9672 chan.context.holder_dust_limit_satoshis = 4001;
9673 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9675 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9676 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9677 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9679 // commitment tx with fee greater than funder amount
9680 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9681 chan.context.feerate_per_kw = 9651936;
9682 chan.context.holder_dust_limit_satoshis = 546;
9683 chan.context.channel_type = cached_channel_type;
9685 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9686 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9687 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9689 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9690 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9691 chan.context.feerate_per_kw = 253;
9692 chan.context.pending_inbound_htlcs.clear();
9693 chan.context.pending_inbound_htlcs.push({
9694 let mut out = InboundHTLCOutput{
9696 amount_msat: 2000000,
9698 payment_hash: PaymentHash([0; 32]),
9699 state: InboundHTLCState::Committed,
9701 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9704 chan.context.pending_outbound_htlcs.clear();
9705 chan.context.pending_outbound_htlcs.push({
9706 let mut out = OutboundHTLCOutput{
9708 amount_msat: 5000001,
9710 payment_hash: PaymentHash([0; 32]),
9711 state: OutboundHTLCState::Committed,
9712 source: HTLCSource::dummy(),
9713 skimmed_fee_msat: None,
9714 blinding_point: None,
9716 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9719 chan.context.pending_outbound_htlcs.push({
9720 let mut out = OutboundHTLCOutput{
9722 amount_msat: 5000000,
9724 payment_hash: PaymentHash([0; 32]),
9725 state: OutboundHTLCState::Committed,
9726 source: HTLCSource::dummy(),
9727 skimmed_fee_msat: None,
9728 blinding_point: None,
9730 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9734 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9735 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9736 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9739 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9740 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9741 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9743 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9744 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9745 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9747 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9748 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9749 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9752 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9753 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9754 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9755 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9758 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9759 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9760 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9762 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9763 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9764 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9766 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9767 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9768 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9773 fn test_per_commitment_secret_gen() {
9774 // Test vectors from BOLT 3 Appendix D:
9776 let mut seed = [0; 32];
9777 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9778 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9779 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9781 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9782 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9783 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9785 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9786 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9788 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9789 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9791 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9792 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9793 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9797 fn test_key_derivation() {
9798 // Test vectors from BOLT 3 Appendix E:
9799 let secp_ctx = Secp256k1::new();
9801 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9802 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9804 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9805 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9807 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9808 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9810 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9811 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9813 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9814 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9816 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9817 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9821 fn test_zero_conf_channel_type_support() {
9822 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9823 let secp_ctx = Secp256k1::new();
9824 let seed = [42; 32];
9825 let network = Network::Testnet;
9826 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9827 let logger = test_utils::TestLogger::new();
9829 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9830 let config = UserConfig::default();
9831 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9832 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9834 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9835 channel_type_features.set_zero_conf_required();
9837 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9838 open_channel_msg.channel_type = Some(channel_type_features);
9839 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9840 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9841 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9842 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9843 assert!(res.is_ok());
9847 fn test_supports_anchors_zero_htlc_tx_fee() {
9848 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9849 // resulting `channel_type`.
9850 let secp_ctx = Secp256k1::new();
9851 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9852 let network = Network::Testnet;
9853 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9854 let logger = test_utils::TestLogger::new();
9856 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9857 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9859 let mut config = UserConfig::default();
9860 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9862 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9863 // need to signal it.
9864 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9865 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9866 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9867 &config, 0, 42, None
9869 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9871 let mut expected_channel_type = ChannelTypeFeatures::empty();
9872 expected_channel_type.set_static_remote_key_required();
9873 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9875 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9876 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9877 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9881 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9882 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9883 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9884 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9885 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9888 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9889 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9893 fn test_rejects_implicit_simple_anchors() {
9894 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9895 // each side's `InitFeatures`, it is rejected.
9896 let secp_ctx = Secp256k1::new();
9897 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9898 let network = Network::Testnet;
9899 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9900 let logger = test_utils::TestLogger::new();
9902 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9903 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9905 let config = UserConfig::default();
9907 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9908 let static_remote_key_required: u64 = 1 << 12;
9909 let simple_anchors_required: u64 = 1 << 20;
9910 let raw_init_features = static_remote_key_required | simple_anchors_required;
9911 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9913 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9914 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9915 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9919 // Set `channel_type` to `None` to force the implicit feature negotiation.
9920 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9921 open_channel_msg.channel_type = None;
9923 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9924 // `static_remote_key`, it will fail the channel.
9925 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9926 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9927 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9928 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9930 assert!(channel_b.is_err());
9934 fn test_rejects_simple_anchors_channel_type() {
9935 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9937 let secp_ctx = Secp256k1::new();
9938 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9939 let network = Network::Testnet;
9940 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9941 let logger = test_utils::TestLogger::new();
9943 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9944 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9946 let config = UserConfig::default();
9948 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9949 let static_remote_key_required: u64 = 1 << 12;
9950 let simple_anchors_required: u64 = 1 << 20;
9951 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9952 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9953 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9954 assert!(!simple_anchors_init.requires_unknown_bits());
9955 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9957 // First, we'll try to open a channel between A and B where A requests a channel type for
9958 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9959 // B as it's not supported by LDK.
9960 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9961 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9962 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9966 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9967 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9969 let res = InboundV1Channel::<&TestKeysInterface>::new(
9970 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9971 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9972 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9974 assert!(res.is_err());
9976 // Then, we'll try to open another channel where A requests a channel type for
9977 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9978 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9980 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9981 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9982 10000000, 100000, 42, &config, 0, 42, None
9985 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9987 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9988 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9989 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9990 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9993 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9994 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9996 let res = channel_a.accept_channel(
9997 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9999 assert!(res.is_err());
10003 fn test_waiting_for_batch() {
10004 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10005 let logger = test_utils::TestLogger::new();
10006 let secp_ctx = Secp256k1::new();
10007 let seed = [42; 32];
10008 let network = Network::Testnet;
10009 let best_block = BestBlock::from_network(network);
10010 let chain_hash = ChainHash::using_genesis_block(network);
10011 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10013 let mut config = UserConfig::default();
10014 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10015 // channel in a batch before all channels are ready.
10016 config.channel_handshake_limits.trust_own_funding_0conf = true;
10018 // Create a channel from node a to node b that will be part of batch funding.
10019 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10020 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10025 &channelmanager::provided_init_features(&config),
10035 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10036 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10037 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10042 &channelmanager::provided_channel_type_features(&config),
10043 &channelmanager::provided_init_features(&config),
10049 true, // Allow node b to send a 0conf channel_ready.
10052 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10053 node_a_chan.accept_channel(
10054 &accept_channel_msg,
10055 &config.channel_handshake_limits,
10056 &channelmanager::provided_init_features(&config),
10059 // Fund the channel with a batch funding transaction.
10060 let output_script = node_a_chan.context.get_funding_redeemscript();
10061 let tx = Transaction {
10063 lock_time: LockTime::ZERO,
10067 value: 10000000, script_pubkey: output_script.clone(),
10070 value: 10000000, script_pubkey: Builder::new().into_script(),
10073 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10074 let funding_created_msg = node_a_chan.get_funding_created(
10075 tx.clone(), funding_outpoint, true, &&logger,
10076 ).map_err(|_| ()).unwrap();
10077 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10078 &funding_created_msg.unwrap(),
10082 ).map_err(|_| ()).unwrap();
10083 let node_b_updates = node_b_chan.monitor_updating_restored(
10091 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10092 // broadcasting the funding transaction until the batch is ready.
10093 let res = node_a_chan.funding_signed(
10094 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10096 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10097 let node_a_updates = node_a_chan.monitor_updating_restored(
10104 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10105 // as the funding transaction depends on all channels in the batch becoming ready.
10106 assert!(node_a_updates.channel_ready.is_none());
10107 assert!(node_a_updates.funding_broadcastable.is_none());
10108 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10110 // It is possible to receive a 0conf channel_ready from the remote node.
10111 node_a_chan.channel_ready(
10112 &node_b_updates.channel_ready.unwrap(),
10120 node_a_chan.context.channel_state,
10121 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10124 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10125 node_a_chan.set_batch_ready();
10126 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10127 assert!(node_a_chan.check_get_channel_ready(0).is_some());