1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 /// Exposes the state of pending inbound HTLCs.
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 /// through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 /// through the exchange of commitment_signed and revoke_and_ack messages.
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178 /// before this HTLC is included on the remote commitment transaction.
179 AwaitingRemoteRevokeToAdd,
180 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181 /// and is included in both commitment transactions.
183 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186 /// payment, it will only be claimed together with other required parts.
188 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191 /// commitment transaction after update_fulfill_htlc.
192 AwaitingRemoteRevokeToRemoveFulfill,
193 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
197 AwaitingRemoteRevokeToRemoveFail,
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
203 InboundHTLCState::RemoteAnnounced(_) => None,
204 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208 InboundHTLCState::Committed =>
209 Some(InboundHTLCStateDetails::Committed),
210 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221 (0, AwaitingRemoteRevokeToAdd) => {},
222 (2, Committed) => {},
223 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224 (6, AwaitingRemoteRevokeToRemoveFail) => {};
227 struct InboundHTLCOutput {
231 payment_hash: PaymentHash,
232 state: InboundHTLCState,
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
239 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241 /// and not part of any commitment transaction.
243 /// The amount in msat.
244 pub amount_msat: u64,
245 /// The block height at which this HTLC expires.
246 pub cltv_expiry: u32,
247 /// The payment hash.
248 pub payment_hash: PaymentHash,
249 /// The state of the HTLC in the state machine.
251 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252 /// waiting for to advance to the next state.
254 /// See [`InboundHTLCStateDetails`] for information on the specific states.
256 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257 /// states may result in `None` here.
258 pub state: Option<InboundHTLCStateDetails>,
259 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260 /// from the local commitment transaction and added to the commitment transaction fee.
261 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262 /// transactions as well.
264 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
268 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274 (0, htlc_id, required),
275 (2, amount_msat, required),
276 (4, cltv_expiry, required),
277 (6, payment_hash, required),
278 (7, state, upgradable_option),
279 (8, is_dust, required),
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286 /// we will promote to Committed (note that they may not accept it until the next time we
287 /// revoke, but we don't really care about that:
288 /// * they've revoked, so worst case we can announce an old state and get our (option on)
289 /// money back (though we won't), and,
290 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
291 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
292 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293 /// we'll never get out of sync).
294 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295 /// OutboundHTLCOutput's size just for a temporary bit
296 LocalAnnounced(Box<msgs::OnionPacket>),
298 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299 /// the change (though they'll need to revoke before we fail the payment).
300 RemoteRemoved(OutboundHTLCOutcome),
301 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305 /// remote revoke_and_ack on a previous state before we can do so.
306 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311 /// revoke_and_ack to drop completely.
312 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
315 /// Exposes the state of pending outbound HTLCs.
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 /// through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 /// through the exchange of commitment_signed and revoke_and_ack messages.
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331 /// on the remote's commitment transaction after update_add_htlc.
332 AwaitingRemoteRevokeToAdd,
333 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334 /// and receiving revoke_and_ack in return.
336 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
339 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342 /// for the removal from its commitment transaction.
343 AwaitingRemoteRevokeToRemoveSuccess,
344 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347 /// for the removal from its commitment transaction.
348 AwaitingRemoteRevokeToRemoveFailure,
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
354 OutboundHTLCState::LocalAnnounced(_) =>
355 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356 OutboundHTLCState::Committed =>
357 OutboundHTLCStateDetails::Committed,
358 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
360 OutboundHTLCState::RemoteRemoved(_) =>
361 OutboundHTLCStateDetails::Committed,
362 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375 (0, AwaitingRemoteRevokeToAdd) => {},
376 (2, Committed) => {},
377 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384 /// LDK version 0.0.105+ will always fill in the preimage here.
385 Success(Option<PaymentPreimage>),
386 Failure(HTLCFailReason),
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390 fn from(o: Option<HTLCFailReason>) -> Self {
392 None => OutboundHTLCOutcome::Success(None),
393 Some(r) => OutboundHTLCOutcome::Failure(r)
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399 fn into(self) -> Option<&'a HTLCFailReason> {
401 OutboundHTLCOutcome::Success(_) => None,
402 OutboundHTLCOutcome::Failure(ref r) => Some(r)
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
412 payment_hash: PaymentHash,
413 state: OutboundHTLCState,
415 blinding_point: Option<PublicKey>,
416 skimmed_fee_msat: Option<u64>,
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
423 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425 /// and not part of any commitment transaction.
427 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428 pub htlc_id: Option<u64>,
429 /// The amount in msat.
430 pub amount_msat: u64,
431 /// The block height at which this HTLC expires.
432 pub cltv_expiry: u32,
433 /// The payment hash.
434 pub payment_hash: PaymentHash,
435 /// The state of the HTLC in the state machine.
437 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438 /// waiting for to advance to the next state.
440 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
442 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443 /// states may result in `None` here.
444 pub state: Option<OutboundHTLCStateDetails>,
445 /// The extra fee being skimmed off the top of this HTLC.
446 pub skimmed_fee_msat: Option<u64>,
447 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448 /// from the local commitment transaction and added to the commitment transaction fee.
449 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450 /// transactions as well.
452 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
456 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462 (0, htlc_id, required),
463 (2, amount_msat, required),
464 (4, cltv_expiry, required),
465 (6, payment_hash, required),
466 (7, state, upgradable_option),
467 (8, skimmed_fee_msat, required),
468 (10, is_dust, required),
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
478 payment_hash: PaymentHash,
480 onion_routing_packet: msgs::OnionPacket,
481 // The extra fee we're skimming off the top of this HTLC.
482 skimmed_fee_msat: Option<u64>,
483 blinding_point: Option<PublicKey>,
486 payment_preimage: PaymentPreimage,
491 err_packet: msgs::OnionErrorPacket,
496 sha256_of_onion: [u8; 32],
500 macro_rules! define_state_flags {
501 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502 #[doc = $flag_type_doc]
503 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504 struct $flag_type(u32);
509 const $flag: $flag_type = $flag_type($value);
512 /// All flags that apply to the specified [`ChannelState`] variant.
514 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
517 fn new() -> Self { Self(0) }
520 fn from_u32(flags: u32) -> Result<Self, ()> {
521 if flags & !Self::ALL.0 != 0 {
524 Ok($flag_type(flags))
529 fn is_empty(&self) -> bool { self.0 == 0 }
531 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
533 fn set(&mut self, flag: Self) { *self |= flag }
535 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
539 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
542 impl core::ops::BitOr for $flag_type {
544 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
546 impl core::ops::BitOrAssign for $flag_type {
547 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
549 impl core::ops::BitAnd for $flag_type {
551 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
553 impl core::ops::BitAndAssign for $flag_type {
554 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
557 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
560 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
563 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
565 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
567 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
570 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
573 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
582 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
584 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
586 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
589 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
591 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
593 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
596 impl PartialEq<FundedStateFlags> for $flag_type {
597 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
599 impl From<FundedStateFlags> for $flag_type {
600 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
605 /// We declare all the states/flags here together to help determine which bits are still available
608 pub const OUR_INIT_SENT: u32 = 1 << 0;
609 pub const THEIR_INIT_SENT: u32 = 1 << 1;
610 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614 pub const CHANNEL_READY: u32 = 1 << 6;
615 pub const PEER_DISCONNECTED: u32 = 1 << 7;
616 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
625 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
627 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631 somewhere and we should pause sending any outbound messages until they've managed to \
632 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
645 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646 NegotiatingFundingFlags, [
647 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
655 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656 FUNDED_STATE, AwaitingChannelReadyFlags, [
657 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666 is being held until all channels in the batch have received `funding_signed` and have \
667 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
673 "Flags that only apply to [`ChannelState::ChannelReady`].",
674 FUNDED_STATE, ChannelReadyFlags, [
675 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
688 /// We are negotiating the parameters required for the channel prior to funding it.
689 NegotiatingFunding(NegotiatingFundingFlags),
690 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
694 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695 /// funding transaction to confirm.
696 AwaitingChannelReady(AwaitingChannelReadyFlags),
697 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
699 ChannelReady(ChannelReadyFlags),
700 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701 /// is about to drop us, but we store this anyway.
705 macro_rules! impl_state_flag {
706 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
708 fn $get(&self) -> bool {
711 ChannelState::$state(flags) => flags.$get(),
720 ChannelState::$state(flags) => flags.$set(),
722 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
726 fn $clear(&mut self) {
729 ChannelState::$state(flags) => { let _ = flags.$clear(); },
731 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
735 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
738 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739 impl_state_flag!($get, $set, $clear, [$state]);
744 fn from_u32(state: u32) -> Result<Self, ()> {
746 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
749 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754 .map(|flags| ChannelState::ChannelReady(flags))
755 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756 Ok(ChannelState::NegotiatingFunding(flags))
764 fn to_u32(&self) -> u32 {
766 ChannelState::NegotiatingFunding(flags) => flags.0,
767 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
774 fn is_pre_funded_state(&self) -> bool {
775 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
778 fn is_both_sides_shutdown(&self) -> bool {
779 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
782 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
784 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786 _ => FundedStateFlags::new(),
790 fn can_generate_new_commitment(&self) -> bool {
792 ChannelState::ChannelReady(flags) =>
793 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
797 debug_assert!(false, "Can only generate new commitment within ChannelReady");
803 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
875 impl fmt::Debug for ChannelError {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
878 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
885 impl fmt::Display for ChannelError {
886 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
888 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889 &ChannelError::Warn(ref e) => write!(f, "{}", e),
890 &ChannelError::Close(ref e) => write!(f, "{}", e),
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
897 pub peer_id: Option<PublicKey>,
898 pub channel_id: Option<ChannelId>,
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902 fn log(&self, mut record: Record) {
903 record.peer_id = self.peer_id;
904 record.channel_id = self.channel_id;
905 self.logger.log(record)
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912 where S::Target: SignerProvider
916 peer_id: Some(context.counterparty_node_id),
917 channel_id: Some(context.channel_id),
922 macro_rules! secp_check {
923 ($res: expr, $err: expr) => {
926 Err(_) => return Err(ChannelError::Close($err)),
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938 /// We've announced the channel as enabled and are connected to our peer.
940 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
942 /// Our channel is live again, but we haven't announced the channel as enabled yet.
944 /// We've announced the channel as disabled.
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
950 pub enum AnnouncementSigsState {
951 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952 /// we sent the last `AnnouncementSignatures`.
954 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955 /// This state never appears on disk - instead we write `NotSent`.
957 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960 /// they send back a `RevokeAndACK`.
961 /// This state never appears on disk - instead we write `NotSent`.
963 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
968 /// An enum indicating whether the local or remote side offered a given HTLC.
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
977 pending_htlcs_value_msat: u64,
978 on_counterparty_tx_dust_exposure_msat: u64,
979 on_holder_tx_dust_exposure_msat: u64,
980 holding_cell_msat: u64,
981 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986 tx: CommitmentTransaction, // the transaction info
987 feerate_per_kw: u32, // the feerate included to build the transaction
988 total_fee_sat: u64, // the total fee included in the transaction
989 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
990 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991 local_balance_msat: u64, // local balance before fees *not* considering dust limits
992 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
1000 origin: HTLCInitiator,
1003 impl HTLCCandidate {
1004 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1014 enum UpdateFulfillFetch {
1016 monitor_update: ChannelMonitorUpdate,
1017 htlc_value_msat: u64,
1018 msg: Option<msgs::UpdateFulfillHTLC>,
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027 /// previously placed in the holding cell (and has since been removed).
1029 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030 monitor_update: ChannelMonitorUpdate,
1031 /// The value of the HTLC which was claimed, in msat.
1032 htlc_value_msat: u64,
1034 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035 /// or has been forgotten (presumably previously claimed).
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041 pub raa: Option<msgs::RevokeAndACK>,
1042 pub commitment_update: Option<msgs::CommitmentUpdate>,
1043 pub order: RAACommitmentOrder,
1044 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047 pub funding_broadcastable: Option<Transaction>,
1048 pub channel_ready: Option<msgs::ChannelReady>,
1049 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1052 /// The return value of `signer_maybe_unblocked`
1054 pub(super) struct SignerResumeUpdates {
1055 pub commitment_update: Option<msgs::CommitmentUpdate>,
1056 pub funding_signed: Option<msgs::FundingSigned>,
1057 pub channel_ready: Option<msgs::ChannelReady>,
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062 pub channel_ready: Option<msgs::ChannelReady>,
1063 pub raa: Option<msgs::RevokeAndACK>,
1064 pub commitment_update: Option<msgs::CommitmentUpdate>,
1065 pub order: RAACommitmentOrder,
1066 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067 pub shutdown_msg: Option<msgs::Shutdown>,
1070 /// The result of a shutdown that should be handled.
1072 pub(crate) struct ShutdownResult {
1073 pub(crate) closure_reason: ClosureReason,
1074 /// A channel monitor update to apply.
1075 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079 /// propagated to the remainder of the batch.
1080 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081 pub(crate) channel_id: ChannelId,
1082 pub(crate) user_channel_id: u128,
1083 pub(crate) channel_capacity_satoshis: u64,
1084 pub(crate) counterparty_node_id: PublicKey,
1085 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086 pub(crate) channel_funding_txo: Option<OutPoint>,
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1130 /// * The expected interval between ticks (1 minute).
1131 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1151 struct PendingChannelMonitorUpdate {
1152 update: ChannelMonitorUpdate,
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156 (0, update, required),
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162 UnfundedOutboundV1(OutboundV1Channel<SP>),
1163 UnfundedInboundV1(InboundV1Channel<SP>),
1164 Funded(Channel<SP>),
1167 impl<'a, SP: Deref> ChannelPhase<SP> where
1168 SP::Target: SignerProvider,
1169 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1171 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1173 ChannelPhase::Funded(chan) => &chan.context,
1174 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1175 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1179 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1181 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1182 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1183 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1188 /// Contains all state common to unfunded inbound/outbound channels.
1189 pub(super) struct UnfundedChannelContext {
1190 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1191 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1192 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1194 /// This is so that we don't keep channels around that haven't progressed to a funded state
1195 /// in a timely manner.
1196 unfunded_channel_age_ticks: usize,
1199 impl UnfundedChannelContext {
1200 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1201 /// having reached the unfunded channel age limit.
1203 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1204 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1205 self.unfunded_channel_age_ticks += 1;
1206 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1210 /// Contains everything about the channel including state, and various flags.
1211 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1212 config: LegacyChannelConfig,
1214 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1215 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1216 // have elapsed since the update occurred.
1217 prev_config: Option<(ChannelConfig, usize)>,
1219 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1223 /// The current channel ID.
1224 channel_id: ChannelId,
1225 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1226 /// Will be `None` for channels created prior to 0.0.115.
1227 temporary_channel_id: Option<ChannelId>,
1228 channel_state: ChannelState,
1230 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1231 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1233 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1234 // Note that a number of our tests were written prior to the behavior here which retransmits
1235 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1237 #[cfg(any(test, feature = "_test_utils"))]
1238 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1239 #[cfg(not(any(test, feature = "_test_utils")))]
1240 announcement_sigs_state: AnnouncementSigsState,
1242 secp_ctx: Secp256k1<secp256k1::All>,
1243 channel_value_satoshis: u64,
1245 latest_monitor_update_id: u64,
1247 holder_signer: ChannelSignerType<SP>,
1248 shutdown_scriptpubkey: Option<ShutdownScript>,
1249 destination_script: ScriptBuf,
1251 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1252 // generation start at 0 and count up...this simplifies some parts of implementation at the
1253 // cost of others, but should really just be changed.
1255 cur_holder_commitment_transaction_number: u64,
1256 cur_counterparty_commitment_transaction_number: u64,
1257 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1258 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1259 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1260 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1262 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1263 /// need to ensure we resend them in the order we originally generated them. Note that because
1264 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1265 /// sufficient to simply set this to the opposite of any message we are generating as we
1266 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1267 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1269 resend_order: RAACommitmentOrder,
1271 monitor_pending_channel_ready: bool,
1272 monitor_pending_revoke_and_ack: bool,
1273 monitor_pending_commitment_signed: bool,
1275 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1276 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1277 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1278 // but need to handle this somehow or we run the risk of losing HTLCs!
1279 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1280 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1281 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1283 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1284 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1285 /// the future when the signer indicates it may have a signature for us.
1287 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1288 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1289 signer_pending_commitment_update: bool,
1290 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1291 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1292 /// outbound or inbound.
1293 signer_pending_funding: bool,
1295 // pending_update_fee is filled when sending and receiving update_fee.
1297 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1298 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1299 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1300 // HTLCs with similar state.
1301 pending_update_fee: Option<(u32, FeeUpdateState)>,
1302 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1303 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1304 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1305 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1306 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1307 holding_cell_update_fee: Option<u32>,
1308 next_holder_htlc_id: u64,
1309 next_counterparty_htlc_id: u64,
1310 feerate_per_kw: u32,
1312 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1313 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1314 /// new block is received, ensuring it's always at least moderately close to the current real
1316 update_time_counter: u32,
1318 #[cfg(debug_assertions)]
1319 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1320 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1321 #[cfg(debug_assertions)]
1322 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1323 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1325 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1326 target_closing_feerate_sats_per_kw: Option<u32>,
1328 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1329 /// update, we need to delay processing it until later. We do that here by simply storing the
1330 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1331 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1333 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1334 /// transaction. These are set once we reach `closing_negotiation_ready`.
1336 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1338 closing_fee_limits: Option<(u64, u64)>,
1340 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1341 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1342 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1343 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1344 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1346 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1347 /// until we see a `commitment_signed` before doing so.
1349 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1350 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1351 expecting_peer_commitment_signed: bool,
1353 /// The hash of the block in which the funding transaction was included.
1354 funding_tx_confirmed_in: Option<BlockHash>,
1355 funding_tx_confirmation_height: u32,
1356 short_channel_id: Option<u64>,
1357 /// Either the height at which this channel was created or the height at which it was last
1358 /// serialized if it was serialized by versions prior to 0.0.103.
1359 /// We use this to close if funding is never broadcasted.
1360 channel_creation_height: u32,
1362 counterparty_dust_limit_satoshis: u64,
1365 pub(super) holder_dust_limit_satoshis: u64,
1367 holder_dust_limit_satoshis: u64,
1370 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1372 counterparty_max_htlc_value_in_flight_msat: u64,
1375 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1377 holder_max_htlc_value_in_flight_msat: u64,
1379 /// minimum channel reserve for self to maintain - set by them.
1380 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1383 pub(super) holder_selected_channel_reserve_satoshis: u64,
1385 holder_selected_channel_reserve_satoshis: u64,
1387 counterparty_htlc_minimum_msat: u64,
1388 holder_htlc_minimum_msat: u64,
1390 pub counterparty_max_accepted_htlcs: u16,
1392 counterparty_max_accepted_htlcs: u16,
1393 holder_max_accepted_htlcs: u16,
1394 minimum_depth: Option<u32>,
1396 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1398 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1399 funding_transaction: Option<Transaction>,
1400 is_batch_funding: Option<()>,
1402 counterparty_cur_commitment_point: Option<PublicKey>,
1403 counterparty_prev_commitment_point: Option<PublicKey>,
1404 counterparty_node_id: PublicKey,
1406 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1408 commitment_secrets: CounterpartyCommitmentSecrets,
1410 channel_update_status: ChannelUpdateStatus,
1411 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1412 /// not complete within a single timer tick (one minute), we should force-close the channel.
1413 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1415 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1416 /// our peer and start the closing_signed negotiation fresh.
1417 closing_signed_in_flight: bool,
1419 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1420 /// This can be used to rebroadcast the channel_announcement message later.
1421 announcement_sigs: Option<(Signature, Signature)>,
1423 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1424 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1425 // be, by comparing the cached values to the fee of the tranaction generated by
1426 // `build_commitment_transaction`.
1427 #[cfg(any(test, fuzzing))]
1428 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1429 #[cfg(any(test, fuzzing))]
1430 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1432 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1433 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1434 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1435 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1436 /// message until we receive a channel_reestablish.
1438 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1439 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1441 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1442 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1443 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1444 /// unblock the state machine.
1446 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1447 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1448 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1450 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1451 /// [`msgs::RevokeAndACK`] message from the counterparty.
1452 sent_message_awaiting_response: Option<usize>,
1454 #[cfg(any(test, fuzzing))]
1455 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1456 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1457 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1458 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1459 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1460 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1461 historical_inbound_htlc_fulfills: HashSet<u64>,
1463 /// This channel's type, as negotiated during channel open
1464 channel_type: ChannelTypeFeatures,
1466 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1467 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1468 // the channel's funding UTXO.
1470 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1471 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1472 // associated channel mapping.
1474 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1475 // to store all of them.
1476 latest_inbound_scid_alias: Option<u64>,
1478 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1479 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1480 // don't currently support node id aliases and eventually privacy should be provided with
1481 // blinded paths instead of simple scid+node_id aliases.
1482 outbound_scid_alias: u64,
1484 // We track whether we already emitted a `ChannelPending` event.
1485 channel_pending_event_emitted: bool,
1487 // We track whether we already emitted a `ChannelReady` event.
1488 channel_ready_event_emitted: bool,
1490 /// Some if we initiated to shut down the channel.
1491 local_initiated_shutdown: Option<()>,
1493 /// The unique identifier used to re-derive the private key material for the channel through
1494 /// [`SignerProvider::derive_channel_signer`].
1496 channel_keys_id: [u8; 32],
1498 pub channel_keys_id: [u8; 32],
1500 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1501 /// store it here and only release it to the `ChannelManager` once it asks for it.
1502 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1505 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1506 fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1507 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1508 entropy_source: &'a ES,
1509 signer_provider: &'a SP,
1510 counterparty_node_id: PublicKey,
1511 their_features: &'a InitFeatures,
1513 config: &'a UserConfig,
1514 current_chain_height: u32,
1517 our_funding_satoshis: u64,
1518 counterparty_pubkeys: ChannelPublicKeys,
1519 channel_type: ChannelTypeFeatures,
1520 holder_selected_channel_reserve_satoshis: u64,
1521 msg_channel_reserve_satoshis: u64,
1523 open_channel_fields: msgs::CommonOpenChannelFields,
1524 ) -> Result<ChannelContext<SP>, ChannelError>
1526 ES::Target: EntropySource,
1527 F::Target: FeeEstimator,
1529 SP::Target: SignerProvider,
1531 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1532 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1534 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1536 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1537 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1538 let pubkeys = holder_signer.pubkeys().clone();
1540 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1541 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1544 // Check sanity of message fields:
1545 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1546 return Err(ChannelError::Close(format!(
1547 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1548 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1549 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1551 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1552 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1554 if msg_channel_reserve_satoshis > channel_value_satoshis {
1555 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1557 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1558 if msg_push_msat > full_channel_value_msat {
1559 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1561 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1562 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1564 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1565 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1567 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1569 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1570 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1571 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1573 if open_channel_fields.max_accepted_htlcs < 1 {
1574 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1576 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1577 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1580 // Now check against optional parameters as set by config...
1581 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1582 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1584 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1585 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1587 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1588 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1590 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1591 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1593 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1594 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1596 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1597 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1599 if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1600 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1603 // Convert things into internal flags and prep our state:
1605 if config.channel_handshake_limits.force_announced_channel_preference {
1606 if config.channel_handshake_config.announced_channel != announced_channel {
1607 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1611 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1612 // Protocol level safety check in place, although it should never happen because
1613 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1614 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1616 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1617 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1619 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1620 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1621 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1623 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1624 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1627 // check if the funder's amount for the initial commitment tx is sufficient
1628 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1629 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1630 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1634 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1635 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1636 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1637 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1640 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1641 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1642 // want to push much to us), our counterparty should always have more than our reserve.
1643 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1644 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1647 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1648 match &open_channel_fields.shutdown_scriptpubkey {
1649 &Some(ref script) => {
1650 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1651 if script.len() == 0 {
1654 if !script::is_bolt2_compliant(&script, their_features) {
1655 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1657 Some(script.clone())
1660 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1662 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1667 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1668 match signer_provider.get_shutdown_scriptpubkey() {
1669 Ok(scriptpubkey) => Some(scriptpubkey),
1670 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1674 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1675 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1676 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1680 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1681 Ok(script) => script,
1682 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1685 let mut secp_ctx = Secp256k1::new();
1686 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1688 let minimum_depth = if is_0conf {
1691 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1694 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1696 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1698 let channel_context = ChannelContext {
1701 config: LegacyChannelConfig {
1702 options: config.channel_config.clone(),
1704 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1709 inbound_handshake_limits_override: None,
1711 temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1712 channel_id: open_channel_fields.temporary_channel_id,
1713 channel_state: ChannelState::NegotiatingFunding(
1714 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1716 announcement_sigs_state: AnnouncementSigsState::NotSent,
1719 latest_monitor_update_id: 0,
1721 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1722 shutdown_scriptpubkey,
1725 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1726 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1729 pending_inbound_htlcs: Vec::new(),
1730 pending_outbound_htlcs: Vec::new(),
1731 holding_cell_htlc_updates: Vec::new(),
1732 pending_update_fee: None,
1733 holding_cell_update_fee: None,
1734 next_holder_htlc_id: 0,
1735 next_counterparty_htlc_id: 0,
1736 update_time_counter: 1,
1738 resend_order: RAACommitmentOrder::CommitmentFirst,
1740 monitor_pending_channel_ready: false,
1741 monitor_pending_revoke_and_ack: false,
1742 monitor_pending_commitment_signed: false,
1743 monitor_pending_forwards: Vec::new(),
1744 monitor_pending_failures: Vec::new(),
1745 monitor_pending_finalized_fulfills: Vec::new(),
1747 signer_pending_commitment_update: false,
1748 signer_pending_funding: false,
1751 #[cfg(debug_assertions)]
1752 holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1753 #[cfg(debug_assertions)]
1754 counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1756 last_sent_closing_fee: None,
1757 pending_counterparty_closing_signed: None,
1758 expecting_peer_commitment_signed: false,
1759 closing_fee_limits: None,
1760 target_closing_feerate_sats_per_kw: None,
1762 funding_tx_confirmed_in: None,
1763 funding_tx_confirmation_height: 0,
1764 short_channel_id: None,
1765 channel_creation_height: current_chain_height,
1767 feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1768 channel_value_satoshis,
1769 counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1770 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1771 counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1772 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1773 counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1774 holder_selected_channel_reserve_satoshis,
1775 counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1776 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1777 counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1778 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1781 counterparty_forwarding_info: None,
1783 channel_transaction_parameters: ChannelTransactionParameters {
1784 holder_pubkeys: pubkeys,
1785 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1786 is_outbound_from_holder: false,
1787 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1788 selected_contest_delay: open_channel_fields.to_self_delay,
1789 pubkeys: counterparty_pubkeys,
1791 funding_outpoint: None,
1792 channel_type_features: channel_type.clone()
1794 funding_transaction: None,
1795 is_batch_funding: None,
1797 counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1798 counterparty_prev_commitment_point: None,
1799 counterparty_node_id,
1801 counterparty_shutdown_scriptpubkey,
1803 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1805 channel_update_status: ChannelUpdateStatus::Enabled,
1806 closing_signed_in_flight: false,
1808 announcement_sigs: None,
1810 #[cfg(any(test, fuzzing))]
1811 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1812 #[cfg(any(test, fuzzing))]
1813 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1815 workaround_lnd_bug_4006: None,
1816 sent_message_awaiting_response: None,
1818 latest_inbound_scid_alias: None,
1819 outbound_scid_alias: 0,
1821 channel_pending_event_emitted: false,
1822 channel_ready_event_emitted: false,
1824 #[cfg(any(test, fuzzing))]
1825 historical_inbound_htlc_fulfills: new_hash_set(),
1830 local_initiated_shutdown: None,
1832 blocked_monitor_updates: Vec::new(),
1838 fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1839 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1840 entropy_source: &'a ES,
1841 signer_provider: &'a SP,
1842 counterparty_node_id: PublicKey,
1843 their_features: &'a InitFeatures,
1844 funding_satoshis: u64,
1847 config: &'a UserConfig,
1848 current_chain_height: u32,
1849 outbound_scid_alias: u64,
1850 temporary_channel_id: Option<ChannelId>,
1851 holder_selected_channel_reserve_satoshis: u64,
1852 channel_keys_id: [u8; 32],
1853 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1854 pubkeys: ChannelPublicKeys,
1855 ) -> Result<ChannelContext<SP>, APIError>
1857 ES::Target: EntropySource,
1858 F::Target: FeeEstimator,
1859 SP::Target: SignerProvider,
1861 // This will be updated with the counterparty contribution if this is a dual-funded channel
1862 let channel_value_satoshis = funding_satoshis;
1864 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1866 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1867 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1869 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1870 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1872 let channel_value_msat = channel_value_satoshis * 1000;
1873 if push_msat > channel_value_msat {
1874 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1876 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1877 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1880 let channel_type = get_initial_channel_type(&config, their_features);
1881 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1883 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1884 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1886 (ConfirmationTarget::NonAnchorChannelFee, 0)
1888 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1890 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1891 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1892 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1893 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1896 let mut secp_ctx = Secp256k1::new();
1897 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1899 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1900 match signer_provider.get_shutdown_scriptpubkey() {
1901 Ok(scriptpubkey) => Some(scriptpubkey),
1902 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1906 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1907 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1908 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1912 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1913 Ok(script) => script,
1914 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1917 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1922 config: LegacyChannelConfig {
1923 options: config.channel_config.clone(),
1924 announced_channel: config.channel_handshake_config.announced_channel,
1925 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1930 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1932 channel_id: temporary_channel_id,
1933 temporary_channel_id: Some(temporary_channel_id),
1934 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1935 announcement_sigs_state: AnnouncementSigsState::NotSent,
1937 // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
1938 channel_value_satoshis,
1940 latest_monitor_update_id: 0,
1942 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1943 shutdown_scriptpubkey,
1946 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1947 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1950 pending_inbound_htlcs: Vec::new(),
1951 pending_outbound_htlcs: Vec::new(),
1952 holding_cell_htlc_updates: Vec::new(),
1953 pending_update_fee: None,
1954 holding_cell_update_fee: None,
1955 next_holder_htlc_id: 0,
1956 next_counterparty_htlc_id: 0,
1957 update_time_counter: 1,
1959 resend_order: RAACommitmentOrder::CommitmentFirst,
1961 monitor_pending_channel_ready: false,
1962 monitor_pending_revoke_and_ack: false,
1963 monitor_pending_commitment_signed: false,
1964 monitor_pending_forwards: Vec::new(),
1965 monitor_pending_failures: Vec::new(),
1966 monitor_pending_finalized_fulfills: Vec::new(),
1968 signer_pending_commitment_update: false,
1969 signer_pending_funding: false,
1971 // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
1972 // when we receive `accept_channel2`.
1973 #[cfg(debug_assertions)]
1974 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1975 #[cfg(debug_assertions)]
1976 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1978 last_sent_closing_fee: None,
1979 pending_counterparty_closing_signed: None,
1980 expecting_peer_commitment_signed: false,
1981 closing_fee_limits: None,
1982 target_closing_feerate_sats_per_kw: None,
1984 funding_tx_confirmed_in: None,
1985 funding_tx_confirmation_height: 0,
1986 short_channel_id: None,
1987 channel_creation_height: current_chain_height,
1989 feerate_per_kw: commitment_feerate,
1990 counterparty_dust_limit_satoshis: 0,
1991 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1992 counterparty_max_htlc_value_in_flight_msat: 0,
1993 // We'll adjust this to include our counterparty's `funding_satoshis` when we
1994 // receive `accept_channel2`.
1995 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1996 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
1997 holder_selected_channel_reserve_satoshis,
1998 counterparty_htlc_minimum_msat: 0,
1999 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2000 counterparty_max_accepted_htlcs: 0,
2001 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2002 minimum_depth: None, // Filled in in accept_channel
2004 counterparty_forwarding_info: None,
2006 channel_transaction_parameters: ChannelTransactionParameters {
2007 holder_pubkeys: pubkeys,
2008 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2009 is_outbound_from_holder: true,
2010 counterparty_parameters: None,
2011 funding_outpoint: None,
2012 channel_type_features: channel_type.clone()
2014 funding_transaction: None,
2015 is_batch_funding: None,
2017 counterparty_cur_commitment_point: None,
2018 counterparty_prev_commitment_point: None,
2019 counterparty_node_id,
2021 counterparty_shutdown_scriptpubkey: None,
2023 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2025 channel_update_status: ChannelUpdateStatus::Enabled,
2026 closing_signed_in_flight: false,
2028 announcement_sigs: None,
2030 #[cfg(any(test, fuzzing))]
2031 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2032 #[cfg(any(test, fuzzing))]
2033 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2035 workaround_lnd_bug_4006: None,
2036 sent_message_awaiting_response: None,
2038 latest_inbound_scid_alias: None,
2039 outbound_scid_alias,
2041 channel_pending_event_emitted: false,
2042 channel_ready_event_emitted: false,
2044 #[cfg(any(test, fuzzing))]
2045 historical_inbound_htlc_fulfills: new_hash_set(),
2050 blocked_monitor_updates: Vec::new(),
2051 local_initiated_shutdown: None,
2055 /// Allowed in any state (including after shutdown)
2056 pub fn get_update_time_counter(&self) -> u32 {
2057 self.update_time_counter
2060 pub fn get_latest_monitor_update_id(&self) -> u64 {
2061 self.latest_monitor_update_id
2064 pub fn should_announce(&self) -> bool {
2065 self.config.announced_channel
2068 pub fn is_outbound(&self) -> bool {
2069 self.channel_transaction_parameters.is_outbound_from_holder
2072 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2073 /// Allowed in any state (including after shutdown)
2074 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2075 self.config.options.forwarding_fee_base_msat
2078 /// Returns true if we've ever received a message from the remote end for this Channel
2079 pub fn have_received_message(&self) -> bool {
2080 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2083 /// Returns true if this channel is fully established and not known to be closing.
2084 /// Allowed in any state (including after shutdown)
2085 pub fn is_usable(&self) -> bool {
2086 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2087 !self.channel_state.is_local_shutdown_sent() &&
2088 !self.channel_state.is_remote_shutdown_sent() &&
2089 !self.monitor_pending_channel_ready
2092 /// shutdown state returns the state of the channel in its various stages of shutdown
2093 pub fn shutdown_state(&self) -> ChannelShutdownState {
2094 match self.channel_state {
2095 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2096 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2097 ChannelShutdownState::ShutdownInitiated
2098 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2099 ChannelShutdownState::ResolvingHTLCs
2100 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2101 ChannelShutdownState::NegotiatingClosingFee
2103 ChannelShutdownState::NotShuttingDown
2105 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2106 _ => ChannelShutdownState::NotShuttingDown,
2110 fn closing_negotiation_ready(&self) -> bool {
2111 let is_ready_to_close = match self.channel_state {
2112 ChannelState::AwaitingChannelReady(flags) =>
2113 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2114 ChannelState::ChannelReady(flags) =>
2115 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2118 self.pending_inbound_htlcs.is_empty() &&
2119 self.pending_outbound_htlcs.is_empty() &&
2120 self.pending_update_fee.is_none() &&
2124 /// Returns true if this channel is currently available for use. This is a superset of
2125 /// is_usable() and considers things like the channel being temporarily disabled.
2126 /// Allowed in any state (including after shutdown)
2127 pub fn is_live(&self) -> bool {
2128 self.is_usable() && !self.channel_state.is_peer_disconnected()
2131 // Public utilities:
2133 pub fn channel_id(&self) -> ChannelId {
2137 // Return the `temporary_channel_id` used during channel establishment.
2139 // Will return `None` for channels created prior to LDK version 0.0.115.
2140 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2141 self.temporary_channel_id
2144 pub fn minimum_depth(&self) -> Option<u32> {
2148 /// Gets the "user_id" value passed into the construction of this channel. It has no special
2149 /// meaning and exists only to allow users to have a persistent identifier of a channel.
2150 pub fn get_user_id(&self) -> u128 {
2154 /// Gets the channel's type
2155 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2159 /// Gets the channel's `short_channel_id`.
2161 /// Will return `None` if the channel hasn't been confirmed yet.
2162 pub fn get_short_channel_id(&self) -> Option<u64> {
2163 self.short_channel_id
2166 /// Allowed in any state (including after shutdown)
2167 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2168 self.latest_inbound_scid_alias
2171 /// Allowed in any state (including after shutdown)
2172 pub fn outbound_scid_alias(&self) -> u64 {
2173 self.outbound_scid_alias
2176 /// Returns the holder signer for this channel.
2178 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2179 return &self.holder_signer
2182 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2183 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2184 /// or prior to any channel actions during `Channel` initialization.
2185 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2186 debug_assert_eq!(self.outbound_scid_alias, 0);
2187 self.outbound_scid_alias = outbound_scid_alias;
2190 /// Returns the funding_txo we either got from our peer, or were given by
2191 /// get_funding_created.
2192 pub fn get_funding_txo(&self) -> Option<OutPoint> {
2193 self.channel_transaction_parameters.funding_outpoint
2196 /// Returns the height in which our funding transaction was confirmed.
2197 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2198 let conf_height = self.funding_tx_confirmation_height;
2199 if conf_height > 0 {
2206 /// Returns the block hash in which our funding transaction was confirmed.
2207 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2208 self.funding_tx_confirmed_in
2211 /// Returns the current number of confirmations on the funding transaction.
2212 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2213 if self.funding_tx_confirmation_height == 0 {
2214 // We either haven't seen any confirmation yet, or observed a reorg.
2218 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2221 fn get_holder_selected_contest_delay(&self) -> u16 {
2222 self.channel_transaction_parameters.holder_selected_contest_delay
2225 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2226 &self.channel_transaction_parameters.holder_pubkeys
2229 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2230 self.channel_transaction_parameters.counterparty_parameters
2231 .as_ref().map(|params| params.selected_contest_delay)
2234 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2235 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2238 /// Allowed in any state (including after shutdown)
2239 pub fn get_counterparty_node_id(&self) -> PublicKey {
2240 self.counterparty_node_id
2243 /// Allowed in any state (including after shutdown)
2244 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2245 self.holder_htlc_minimum_msat
2248 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2249 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2250 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2253 /// Allowed in any state (including after shutdown)
2254 pub fn get_announced_htlc_max_msat(&self) -> u64 {
2256 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2257 // to use full capacity. This is an effort to reduce routing failures, because in many cases
2258 // channel might have been used to route very small values (either by honest users or as DoS).
2259 self.channel_value_satoshis * 1000 * 9 / 10,
2261 self.counterparty_max_htlc_value_in_flight_msat
2265 /// Allowed in any state (including after shutdown)
2266 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2267 self.counterparty_htlc_minimum_msat
2270 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2271 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2272 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2275 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2276 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2277 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2279 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2280 party_max_htlc_value_in_flight_msat
2285 pub fn get_value_satoshis(&self) -> u64 {
2286 self.channel_value_satoshis
2289 pub fn get_fee_proportional_millionths(&self) -> u32 {
2290 self.config.options.forwarding_fee_proportional_millionths
2293 pub fn get_cltv_expiry_delta(&self) -> u16 {
2294 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2297 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2298 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2299 where F::Target: FeeEstimator
2301 match self.config.options.max_dust_htlc_exposure {
2302 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2303 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2304 ConfirmationTarget::OnChainSweep) as u64;
2305 feerate_per_kw.saturating_mul(multiplier)
2307 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2311 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2312 pub fn prev_config(&self) -> Option<ChannelConfig> {
2313 self.prev_config.map(|prev_config| prev_config.0)
2316 // Checks whether we should emit a `ChannelPending` event.
2317 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2318 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2321 // Returns whether we already emitted a `ChannelPending` event.
2322 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2323 self.channel_pending_event_emitted
2326 // Remembers that we already emitted a `ChannelPending` event.
2327 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2328 self.channel_pending_event_emitted = true;
2331 // Checks whether we should emit a `ChannelReady` event.
2332 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2333 self.is_usable() && !self.channel_ready_event_emitted
2336 // Remembers that we already emitted a `ChannelReady` event.
2337 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2338 self.channel_ready_event_emitted = true;
2341 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2342 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2343 /// no longer be considered when forwarding HTLCs.
2344 pub fn maybe_expire_prev_config(&mut self) {
2345 if self.prev_config.is_none() {
2348 let prev_config = self.prev_config.as_mut().unwrap();
2350 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2351 self.prev_config = None;
2355 /// Returns the current [`ChannelConfig`] applied to the channel.
2356 pub fn config(&self) -> ChannelConfig {
2360 /// Updates the channel's config. A bool is returned indicating whether the config update
2361 /// applied resulted in a new ChannelUpdate message.
2362 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2363 let did_channel_update =
2364 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2365 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2366 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2367 if did_channel_update {
2368 self.prev_config = Some((self.config.options, 0));
2369 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2370 // policy change to propagate throughout the network.
2371 self.update_time_counter += 1;
2373 self.config.options = *config;
2377 /// Returns true if funding_signed was sent/received and the
2378 /// funding transaction has been broadcast if necessary.
2379 pub fn is_funding_broadcast(&self) -> bool {
2380 !self.channel_state.is_pre_funded_state() &&
2381 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2384 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2385 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2386 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2387 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2388 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2390 /// @local is used only to convert relevant internal structures which refer to remote vs local
2391 /// to decide value of outputs and direction of HTLCs.
2392 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2393 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2394 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2395 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2396 /// which peer generated this transaction and "to whom" this transaction flows.
2398 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2399 where L::Target: Logger
2401 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2402 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2403 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2405 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2406 let mut remote_htlc_total_msat = 0;
2407 let mut local_htlc_total_msat = 0;
2408 let mut value_to_self_msat_offset = 0;
2410 let mut feerate_per_kw = self.feerate_per_kw;
2411 if let Some((feerate, update_state)) = self.pending_update_fee {
2412 if match update_state {
2413 // Note that these match the inclusion criteria when scanning
2414 // pending_inbound_htlcs below.
2415 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2416 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2417 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
2419 feerate_per_kw = feerate;
2423 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2424 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2425 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2427 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2429 macro_rules! get_htlc_in_commitment {
2430 ($htlc: expr, $offered: expr) => {
2431 HTLCOutputInCommitment {
2433 amount_msat: $htlc.amount_msat,
2434 cltv_expiry: $htlc.cltv_expiry,
2435 payment_hash: $htlc.payment_hash,
2436 transaction_output_index: None
2441 macro_rules! add_htlc_output {
2442 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2443 if $outbound == local { // "offered HTLC output"
2444 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2445 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2448 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2450 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2451 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2452 included_non_dust_htlcs.push((htlc_in_tx, $source));
2454 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2455 included_dust_htlcs.push((htlc_in_tx, $source));
2458 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2459 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2462 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2464 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2465 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2466 included_non_dust_htlcs.push((htlc_in_tx, $source));
2468 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2469 included_dust_htlcs.push((htlc_in_tx, $source));
2475 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2477 for ref htlc in self.pending_inbound_htlcs.iter() {
2478 let (include, state_name) = match htlc.state {
2479 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2480 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2481 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2482 InboundHTLCState::Committed => (true, "Committed"),
2483 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2487 add_htlc_output!(htlc, false, None, state_name);
2488 remote_htlc_total_msat += htlc.amount_msat;
2490 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2492 &InboundHTLCState::LocalRemoved(ref reason) => {
2493 if generated_by_local {
2494 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2495 inbound_htlc_preimages.push(preimage);
2496 value_to_self_msat_offset += htlc.amount_msat as i64;
2506 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2508 for ref htlc in self.pending_outbound_htlcs.iter() {
2509 let (include, state_name) = match htlc.state {
2510 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2511 OutboundHTLCState::Committed => (true, "Committed"),
2512 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2513 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2514 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2517 let preimage_opt = match htlc.state {
2518 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2519 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2520 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2524 if let Some(preimage) = preimage_opt {
2525 outbound_htlc_preimages.push(preimage);
2529 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2530 local_htlc_total_msat += htlc.amount_msat;
2532 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2534 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2535 value_to_self_msat_offset -= htlc.amount_msat as i64;
2537 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2538 if !generated_by_local {
2539 value_to_self_msat_offset -= htlc.amount_msat as i64;
2547 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2548 assert!(value_to_self_msat >= 0);
2549 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2550 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2551 // "violate" their reserve value by couting those against it. Thus, we have to convert
2552 // everything to i64 before subtracting as otherwise we can overflow.
2553 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2554 assert!(value_to_remote_msat >= 0);
2556 #[cfg(debug_assertions)]
2558 // Make sure that the to_self/to_remote is always either past the appropriate
2559 // channel_reserve *or* it is making progress towards it.
2560 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2561 self.holder_max_commitment_tx_output.lock().unwrap()
2563 self.counterparty_max_commitment_tx_output.lock().unwrap()
2565 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2566 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2567 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2568 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2571 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2572 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2573 let (value_to_self, value_to_remote) = if self.is_outbound() {
2574 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2576 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2579 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2580 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2581 let (funding_pubkey_a, funding_pubkey_b) = if local {
2582 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2584 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2587 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2588 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2593 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2594 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2599 let num_nondust_htlcs = included_non_dust_htlcs.len();
2601 let channel_parameters =
2602 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2603 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2604 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2611 &mut included_non_dust_htlcs,
2614 let mut htlcs_included = included_non_dust_htlcs;
2615 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2616 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2617 htlcs_included.append(&mut included_dust_htlcs);
2625 local_balance_msat: value_to_self_msat as u64,
2626 remote_balance_msat: value_to_remote_msat as u64,
2627 inbound_htlc_preimages,
2628 outbound_htlc_preimages,
2633 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2634 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2635 /// our counterparty!)
2636 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2637 /// TODO Some magic rust shit to compile-time check this?
2638 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2639 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2640 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2641 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2642 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2644 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2648 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2649 /// will sign and send to our counterparty.
2650 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2651 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2652 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2653 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2654 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2656 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2659 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2660 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2661 /// Panics if called before accept_channel/InboundV1Channel::new
2662 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2663 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2666 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2667 &self.get_counterparty_pubkeys().funding_pubkey
2670 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2674 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2675 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2676 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2677 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2678 // more dust balance if the feerate increases when we have several HTLCs pending
2679 // which are near the dust limit.
2680 let mut feerate_per_kw = self.feerate_per_kw;
2681 // If there's a pending update fee, use it to ensure we aren't under-estimating
2682 // potential feerate updates coming soon.
2683 if let Some((feerate, _)) = self.pending_update_fee {
2684 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2686 if let Some(feerate) = outbound_feerate_update {
2687 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2689 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2690 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2693 /// Get forwarding information for the counterparty.
2694 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2695 self.counterparty_forwarding_info.clone()
2698 /// Returns a HTLCStats about inbound pending htlcs
2699 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2701 let mut stats = HTLCStats {
2702 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2703 pending_htlcs_value_msat: 0,
2704 on_counterparty_tx_dust_exposure_msat: 0,
2705 on_holder_tx_dust_exposure_msat: 0,
2706 holding_cell_msat: 0,
2707 on_holder_tx_holding_cell_htlcs_count: 0,
2710 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2713 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2714 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2715 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2717 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2718 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2719 for ref htlc in context.pending_inbound_htlcs.iter() {
2720 stats.pending_htlcs_value_msat += htlc.amount_msat;
2721 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2722 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2724 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2725 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2731 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2732 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2734 let mut stats = HTLCStats {
2735 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2736 pending_htlcs_value_msat: 0,
2737 on_counterparty_tx_dust_exposure_msat: 0,
2738 on_holder_tx_dust_exposure_msat: 0,
2739 holding_cell_msat: 0,
2740 on_holder_tx_holding_cell_htlcs_count: 0,
2743 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2746 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2747 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2748 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2750 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2751 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2752 for ref htlc in context.pending_outbound_htlcs.iter() {
2753 stats.pending_htlcs_value_msat += htlc.amount_msat;
2754 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2755 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2757 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2758 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2762 for update in context.holding_cell_htlc_updates.iter() {
2763 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2764 stats.pending_htlcs += 1;
2765 stats.pending_htlcs_value_msat += amount_msat;
2766 stats.holding_cell_msat += amount_msat;
2767 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2768 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2770 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2771 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2773 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2780 /// Returns information on all pending inbound HTLCs.
2781 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2782 let mut holding_cell_states = new_hash_map();
2783 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2784 match holding_cell_update {
2785 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2786 holding_cell_states.insert(
2788 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2791 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2792 holding_cell_states.insert(
2794 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2797 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2798 holding_cell_states.insert(
2800 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2804 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2807 let mut inbound_details = Vec::new();
2808 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2811 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2812 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2814 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2815 for htlc in self.pending_inbound_htlcs.iter() {
2816 if let Some(state_details) = (&htlc.state).into() {
2817 inbound_details.push(InboundHTLCDetails{
2818 htlc_id: htlc.htlc_id,
2819 amount_msat: htlc.amount_msat,
2820 cltv_expiry: htlc.cltv_expiry,
2821 payment_hash: htlc.payment_hash,
2822 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2823 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2830 /// Returns information on all pending outbound HTLCs.
2831 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2832 let mut outbound_details = Vec::new();
2833 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2836 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2837 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2839 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2840 for htlc in self.pending_outbound_htlcs.iter() {
2841 outbound_details.push(OutboundHTLCDetails{
2842 htlc_id: Some(htlc.htlc_id),
2843 amount_msat: htlc.amount_msat,
2844 cltv_expiry: htlc.cltv_expiry,
2845 payment_hash: htlc.payment_hash,
2846 skimmed_fee_msat: htlc.skimmed_fee_msat,
2847 state: Some((&htlc.state).into()),
2848 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2851 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2852 if let HTLCUpdateAwaitingACK::AddHTLC {
2858 } = *holding_cell_update {
2859 outbound_details.push(OutboundHTLCDetails{
2861 amount_msat: amount_msat,
2862 cltv_expiry: cltv_expiry,
2863 payment_hash: payment_hash,
2864 skimmed_fee_msat: skimmed_fee_msat,
2865 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2866 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2873 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2874 /// Doesn't bother handling the
2875 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2876 /// corner case properly.
2877 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2878 -> AvailableBalances
2879 where F::Target: FeeEstimator
2881 let context = &self;
2882 // Note that we have to handle overflow due to the above case.
2883 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2884 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2886 let mut balance_msat = context.value_to_self_msat;
2887 for ref htlc in context.pending_inbound_htlcs.iter() {
2888 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2889 balance_msat += htlc.amount_msat;
2892 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2894 let outbound_capacity_msat = context.value_to_self_msat
2895 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2897 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2899 let mut available_capacity_msat = outbound_capacity_msat;
2901 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2902 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2906 if context.is_outbound() {
2907 // We should mind channel commit tx fee when computing how much of the available capacity
2908 // can be used in the next htlc. Mirrors the logic in send_htlc.
2910 // The fee depends on whether the amount we will be sending is above dust or not,
2911 // and the answer will in turn change the amount itself — making it a circular
2913 // This complicates the computation around dust-values, up to the one-htlc-value.
2914 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2915 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2916 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2919 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2920 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2921 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2922 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2923 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2924 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2925 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2928 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2929 // value ends up being below dust, we have this fee available again. In that case,
2930 // match the value to right-below-dust.
2931 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2932 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2933 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2934 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2935 debug_assert!(one_htlc_difference_msat != 0);
2936 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2937 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2938 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2940 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2943 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2944 // sending a new HTLC won't reduce their balance below our reserve threshold.
2945 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2946 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2947 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2950 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2951 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2953 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2954 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2955 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2957 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2958 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2959 // we've selected for them, we can only send dust HTLCs.
2960 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2964 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2966 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2967 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2968 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2969 // send above the dust limit (as the router can always overpay to meet the dust limit).
2970 let mut remaining_msat_below_dust_exposure_limit = None;
2971 let mut dust_exposure_dust_limit_msat = 0;
2972 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2974 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2975 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2977 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2978 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2979 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2981 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2982 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2983 remaining_msat_below_dust_exposure_limit =
2984 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2985 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2988 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2989 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2990 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2991 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2992 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2993 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2996 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2997 if available_capacity_msat < dust_exposure_dust_limit_msat {
2998 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3000 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3004 available_capacity_msat = cmp::min(available_capacity_msat,
3005 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
3007 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
3008 available_capacity_msat = 0;
3012 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3013 - context.value_to_self_msat as i64
3014 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
3015 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3017 outbound_capacity_msat,
3018 next_outbound_htlc_limit_msat: available_capacity_msat,
3019 next_outbound_htlc_minimum_msat,
3024 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3025 let context = &self;
3026 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3029 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3030 /// number of pending HTLCs that are on track to be in our next commitment tx.
3032 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3033 /// `fee_spike_buffer_htlc` is `Some`.
3035 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3036 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3038 /// Dust HTLCs are excluded.
3039 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3040 let context = &self;
3041 assert!(context.is_outbound());
3043 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3046 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3047 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3049 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3050 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3052 let mut addl_htlcs = 0;
3053 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3055 HTLCInitiator::LocalOffered => {
3056 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3060 HTLCInitiator::RemoteOffered => {
3061 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3067 let mut included_htlcs = 0;
3068 for ref htlc in context.pending_inbound_htlcs.iter() {
3069 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3072 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3073 // transaction including this HTLC if it times out before they RAA.
3074 included_htlcs += 1;
3077 for ref htlc in context.pending_outbound_htlcs.iter() {
3078 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3082 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3083 OutboundHTLCState::Committed => included_htlcs += 1,
3084 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3085 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3086 // transaction won't be generated until they send us their next RAA, which will mean
3087 // dropping any HTLCs in this state.
3092 for htlc in context.holding_cell_htlc_updates.iter() {
3094 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3095 if amount_msat / 1000 < real_dust_limit_timeout_sat {
3100 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3101 // ack we're guaranteed to never include them in commitment txs anymore.
3105 let num_htlcs = included_htlcs + addl_htlcs;
3106 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3107 #[cfg(any(test, fuzzing))]
3110 if fee_spike_buffer_htlc.is_some() {
3111 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3113 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3114 + context.holding_cell_htlc_updates.len();
3115 let commitment_tx_info = CommitmentTxInfoCached {
3117 total_pending_htlcs,
3118 next_holder_htlc_id: match htlc.origin {
3119 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3120 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3122 next_counterparty_htlc_id: match htlc.origin {
3123 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3124 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3126 feerate: context.feerate_per_kw,
3128 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3133 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3134 /// pending HTLCs that are on track to be in their next commitment tx
3136 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3137 /// `fee_spike_buffer_htlc` is `Some`.
3139 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3140 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3142 /// Dust HTLCs are excluded.
3143 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3144 let context = &self;
3145 assert!(!context.is_outbound());
3147 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3150 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3151 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3153 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3154 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3156 let mut addl_htlcs = 0;
3157 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3159 HTLCInitiator::LocalOffered => {
3160 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3164 HTLCInitiator::RemoteOffered => {
3165 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3171 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3172 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3173 // committed outbound HTLCs, see below.
3174 let mut included_htlcs = 0;
3175 for ref htlc in context.pending_inbound_htlcs.iter() {
3176 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3179 included_htlcs += 1;
3182 for ref htlc in context.pending_outbound_htlcs.iter() {
3183 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3186 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3187 // i.e. if they've responded to us with an RAA after announcement.
3189 OutboundHTLCState::Committed => included_htlcs += 1,
3190 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3191 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3196 let num_htlcs = included_htlcs + addl_htlcs;
3197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3198 #[cfg(any(test, fuzzing))]
3201 if fee_spike_buffer_htlc.is_some() {
3202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3205 let commitment_tx_info = CommitmentTxInfoCached {
3207 total_pending_htlcs,
3208 next_holder_htlc_id: match htlc.origin {
3209 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3210 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3212 next_counterparty_htlc_id: match htlc.origin {
3213 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3214 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3216 feerate: context.feerate_per_kw,
3218 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3223 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3224 match self.channel_state {
3225 ChannelState::FundingNegotiated => f(),
3226 ChannelState::AwaitingChannelReady(flags) =>
3227 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3228 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3238 /// Returns the transaction if there is a pending funding transaction that is yet to be
3240 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3241 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3244 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3246 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3247 self.if_unbroadcasted_funding(||
3248 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3252 /// Returns whether the channel is funded in a batch.
3253 pub fn is_batch_funding(&self) -> bool {
3254 self.is_batch_funding.is_some()
3257 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3259 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3260 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3263 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3264 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3265 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3266 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3267 /// immediately (others we will have to allow to time out).
3268 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3269 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3270 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3271 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3272 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3273 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3275 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3276 // return them to fail the payment.
3277 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3278 let counterparty_node_id = self.get_counterparty_node_id();
3279 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3281 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3282 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3287 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3288 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3289 // returning a channel monitor update here would imply a channel monitor update before
3290 // we even registered the channel monitor to begin with, which is invalid.
3291 // Thus, if we aren't actually at a point where we could conceivably broadcast the
3292 // funding transaction, don't return a funding txo (which prevents providing the
3293 // monitor update to the user, even if we return one).
3294 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3295 if !self.channel_state.is_pre_funded_state() {
3296 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3297 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3298 update_id: self.latest_monitor_update_id,
3299 counterparty_node_id: Some(self.counterparty_node_id),
3300 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3301 channel_id: Some(self.channel_id()),
3305 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3306 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3308 self.channel_state = ChannelState::ShutdownComplete;
3309 self.update_time_counter += 1;
3313 dropped_outbound_htlcs,
3314 unbroadcasted_batch_funding_txid,
3315 channel_id: self.channel_id,
3316 user_channel_id: self.user_id,
3317 channel_capacity_satoshis: self.channel_value_satoshis,
3318 counterparty_node_id: self.counterparty_node_id,
3319 unbroadcasted_funding_tx,
3320 channel_funding_txo: self.get_funding_txo(),
3324 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3325 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3326 let counterparty_keys = self.build_remote_transaction_keys();
3327 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3329 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3330 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3331 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3332 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3334 match &self.holder_signer {
3335 // TODO (arik): move match into calling method for Taproot
3336 ChannelSignerType::Ecdsa(ecdsa) => {
3337 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3338 .map(|(signature, _)| msgs::FundingSigned {
3339 channel_id: self.channel_id(),
3342 partial_signature_with_nonce: None,
3346 if funding_signed.is_none() {
3347 #[cfg(not(async_signing))] {
3348 panic!("Failed to get signature for funding_signed");
3350 #[cfg(async_signing)] {
3351 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3352 self.signer_pending_funding = true;
3354 } else if self.signer_pending_funding {
3355 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3356 self.signer_pending_funding = false;
3359 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3360 (counterparty_initial_commitment_tx, funding_signed)
3362 // TODO (taproot|arik)
3368 /// If we receive an error message when attempting to open a channel, it may only be a rejection
3369 /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
3370 /// downgrade of channel features would be possible so that we can still open the channel.
3371 pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
3372 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
3375 F::Target: FeeEstimator
3377 if !self.is_outbound() ||
3379 self.channel_state, ChannelState::NegotiatingFunding(flags)
3380 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
3385 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
3386 // We've exhausted our options
3389 // We support opening a few different types of channels. Try removing our additional
3390 // features one by one until we've either arrived at our default or the counterparty has
3393 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
3394 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
3395 // checks whether the counterparty supports every feature, this would only happen if the
3396 // counterparty is advertising the feature, but rejecting channels proposing the feature for
3398 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
3399 self.channel_type.clear_anchors_zero_fee_htlc_tx();
3400 self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
3401 assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
3402 } else if self.channel_type.supports_scid_privacy() {
3403 self.channel_type.clear_scid_privacy();
3405 self.channel_type = ChannelTypeFeatures::only_static_remote_key();
3407 self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
3412 // Internal utility functions for channels
3414 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3415 /// `channel_value_satoshis` in msat, set through
3416 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3418 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3420 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3421 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3422 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3424 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3427 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3429 channel_value_satoshis * 10 * configured_percent
3432 /// Returns a minimum channel reserve value the remote needs to maintain,
3433 /// required by us according to the configured or default
3434 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3436 /// Guaranteed to return a value no larger than channel_value_satoshis
3438 /// This is used both for outbound and inbound channels and has lower bound
3439 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3440 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3441 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3442 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3445 /// This is for legacy reasons, present for forward-compatibility.
3446 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3447 /// from storage. Hence, we use this function to not persist default values of
3448 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3449 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3450 let (q, _) = channel_value_satoshis.overflowing_div(100);
3451 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3454 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3455 /// default of 1% of the total channel value.
3457 /// Guaranteed to return a value no larger than channel_value_satoshis
3459 /// This is used both for outbound and inbound channels and has lower bound
3460 /// of `dust_limit_satoshis`.
3461 #[cfg(dual_funding)]
3462 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3463 // Fixed at 1% of channel value by spec.
3464 let (q, _) = channel_value_satoshis.overflowing_div(100);
3465 cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3468 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3469 // Note that num_htlcs should not include dust HTLCs.
3471 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3472 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3475 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3476 // Note that num_htlcs should not include dust HTLCs.
3477 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3478 // Note that we need to divide before multiplying to round properly,
3479 // since the lowest denomination of bitcoin on-chain is the satoshi.
3480 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3483 /// Context for dual-funded channels.
3484 #[cfg(dual_funding)]
3485 pub(super) struct DualFundingChannelContext {
3486 /// The amount in satoshis we will be contributing to the channel.
3487 pub our_funding_satoshis: u64,
3488 /// The amount in satoshis our counterparty will be contributing to the channel.
3489 pub their_funding_satoshis: u64,
3490 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3491 /// to the current block height to align incentives against fee-sniping.
3492 pub funding_tx_locktime: u32,
3493 /// The feerate set by the initiator to be used for the funding transaction.
3494 pub funding_feerate_sat_per_1000_weight: u32,
3497 // Holder designates channel data owned for the benefit of the user client.
3498 // Counterparty designates channel data owned by the another channel participant entity.
3499 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3500 pub context: ChannelContext<SP>,
3501 #[cfg(dual_funding)]
3502 pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3505 #[cfg(any(test, fuzzing))]
3506 struct CommitmentTxInfoCached {
3508 total_pending_htlcs: usize,
3509 next_holder_htlc_id: u64,
3510 next_counterparty_htlc_id: u64,
3514 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3515 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3516 trait FailHTLCContents {
3517 type Message: FailHTLCMessageName;
3518 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3519 fn to_inbound_htlc_state(self) -> InboundHTLCState;
3520 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3522 impl FailHTLCContents for msgs::OnionErrorPacket {
3523 type Message = msgs::UpdateFailHTLC;
3524 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3525 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3527 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3528 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3530 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3531 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3534 impl FailHTLCContents for ([u8; 32], u16) {
3535 type Message = msgs::UpdateFailMalformedHTLC;
3536 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3537 msgs::UpdateFailMalformedHTLC {
3540 sha256_of_onion: self.0,
3541 failure_code: self.1
3544 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3545 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3547 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3548 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3550 sha256_of_onion: self.0,
3551 failure_code: self.1
3556 trait FailHTLCMessageName {
3557 fn name() -> &'static str;
3559 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3560 fn name() -> &'static str {
3564 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3565 fn name() -> &'static str {
3566 "update_fail_malformed_htlc"
3570 impl<SP: Deref> Channel<SP> where
3571 SP::Target: SignerProvider,
3572 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3574 fn check_remote_fee<F: Deref, L: Deref>(
3575 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3576 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3577 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3579 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3580 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3582 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3584 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3585 if feerate_per_kw < lower_limit {
3586 if let Some(cur_feerate) = cur_feerate_per_kw {
3587 if feerate_per_kw > cur_feerate {
3589 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3590 cur_feerate, feerate_per_kw);
3594 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3600 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3601 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3602 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3603 // outside of those situations will fail.
3604 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3608 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3613 1 + // script length (0)
3617 )*4 + // * 4 for non-witness parts
3618 2 + // witness marker and flag
3619 1 + // witness element count
3620 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3621 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3622 2*(1 + 71); // two signatures + sighash type flags
3623 if let Some(spk) = a_scriptpubkey {
3624 ret += ((8+1) + // output values and script length
3625 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3627 if let Some(spk) = b_scriptpubkey {
3628 ret += ((8+1) + // output values and script length
3629 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3635 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3636 assert!(self.context.pending_inbound_htlcs.is_empty());
3637 assert!(self.context.pending_outbound_htlcs.is_empty());
3638 assert!(self.context.pending_update_fee.is_none());
3640 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3641 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3642 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3644 if value_to_holder < 0 {
3645 assert!(self.context.is_outbound());
3646 total_fee_satoshis += (-value_to_holder) as u64;
3647 } else if value_to_counterparty < 0 {
3648 assert!(!self.context.is_outbound());
3649 total_fee_satoshis += (-value_to_counterparty) as u64;
3652 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3653 value_to_counterparty = 0;
3656 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3657 value_to_holder = 0;
3660 assert!(self.context.shutdown_scriptpubkey.is_some());
3661 let holder_shutdown_script = self.get_closing_scriptpubkey();
3662 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3663 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3665 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3666 (closing_transaction, total_fee_satoshis)
3669 fn funding_outpoint(&self) -> OutPoint {
3670 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3673 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3676 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3677 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3679 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3681 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3682 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3683 where L::Target: Logger {
3684 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3685 // (see equivalent if condition there).
3686 assert!(!self.context.channel_state.can_generate_new_commitment());
3687 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3688 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3689 self.context.latest_monitor_update_id = mon_update_id;
3690 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3691 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3695 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3696 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3697 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3698 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3700 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3701 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3704 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3705 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3706 // these, but for now we just have to treat them as normal.
3708 let mut pending_idx = core::usize::MAX;
3709 let mut htlc_value_msat = 0;
3710 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3711 if htlc.htlc_id == htlc_id_arg {
3712 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3713 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3714 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3716 InboundHTLCState::Committed => {},
3717 InboundHTLCState::LocalRemoved(ref reason) => {
3718 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3720 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3721 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3723 return UpdateFulfillFetch::DuplicateClaim {};
3726 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3727 // Don't return in release mode here so that we can update channel_monitor
3731 htlc_value_msat = htlc.amount_msat;
3735 if pending_idx == core::usize::MAX {
3736 #[cfg(any(test, fuzzing))]
3737 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3738 // this is simply a duplicate claim, not previously failed and we lost funds.
3739 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3740 return UpdateFulfillFetch::DuplicateClaim {};
3743 // Now update local state:
3745 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3746 // can claim it even if the channel hits the chain before we see their next commitment.
3747 self.context.latest_monitor_update_id += 1;
3748 let monitor_update = ChannelMonitorUpdate {
3749 update_id: self.context.latest_monitor_update_id,
3750 counterparty_node_id: Some(self.context.counterparty_node_id),
3751 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3752 payment_preimage: payment_preimage_arg.clone(),
3754 channel_id: Some(self.context.channel_id()),
3757 if !self.context.channel_state.can_generate_new_commitment() {
3758 // Note that this condition is the same as the assertion in
3759 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3760 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3761 // do not not get into this branch.
3762 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3763 match pending_update {
3764 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3765 if htlc_id_arg == htlc_id {
3766 // Make sure we don't leave latest_monitor_update_id incremented here:
3767 self.context.latest_monitor_update_id -= 1;
3768 #[cfg(any(test, fuzzing))]
3769 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3770 return UpdateFulfillFetch::DuplicateClaim {};
3773 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3774 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3776 if htlc_id_arg == htlc_id {
3777 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3778 // TODO: We may actually be able to switch to a fulfill here, though its
3779 // rare enough it may not be worth the complexity burden.
3780 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3781 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3787 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3788 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3789 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3791 #[cfg(any(test, fuzzing))]
3792 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3793 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3795 #[cfg(any(test, fuzzing))]
3796 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3799 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3800 if let InboundHTLCState::Committed = htlc.state {
3802 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3803 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3805 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3806 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3809 UpdateFulfillFetch::NewClaim {
3812 msg: Some(msgs::UpdateFulfillHTLC {
3813 channel_id: self.context.channel_id(),
3814 htlc_id: htlc_id_arg,
3815 payment_preimage: payment_preimage_arg,
3820 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3821 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3822 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3823 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3824 // Even if we aren't supposed to let new monitor updates with commitment state
3825 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3826 // matter what. Sadly, to push a new monitor update which flies before others
3827 // already queued, we have to insert it into the pending queue and update the
3828 // update_ids of all the following monitors.
3829 if release_cs_monitor && msg.is_some() {
3830 let mut additional_update = self.build_commitment_no_status_check(logger);
3831 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3832 // to be strictly increasing by one, so decrement it here.
3833 self.context.latest_monitor_update_id = monitor_update.update_id;
3834 monitor_update.updates.append(&mut additional_update.updates);
3836 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3837 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3838 monitor_update.update_id = new_mon_id;
3839 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3840 held_update.update.update_id += 1;
3843 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3844 let update = self.build_commitment_no_status_check(logger);
3845 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3851 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3852 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3854 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3858 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3859 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3860 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3861 /// before we fail backwards.
3863 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3864 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3865 /// [`ChannelError::Ignore`].
3866 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3867 -> Result<(), ChannelError> where L::Target: Logger {
3868 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3869 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3872 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3873 /// want to fail blinded HTLCs where we are not the intro node.
3875 /// See [`Self::queue_fail_htlc`] for more info.
3876 pub fn queue_fail_malformed_htlc<L: Deref>(
3877 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3878 ) -> Result<(), ChannelError> where L::Target: Logger {
3879 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3880 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3883 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3884 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3885 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3886 /// before we fail backwards.
3888 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3889 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3890 /// [`ChannelError::Ignore`].
3891 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3892 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3894 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3895 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3896 panic!("Was asked to fail an HTLC when channel was not in an operational state");
3899 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3900 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3901 // these, but for now we just have to treat them as normal.
3903 let mut pending_idx = core::usize::MAX;
3904 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3905 if htlc.htlc_id == htlc_id_arg {
3907 InboundHTLCState::Committed => {},
3908 InboundHTLCState::LocalRemoved(ref reason) => {
3909 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3911 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3916 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3917 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3923 if pending_idx == core::usize::MAX {
3924 #[cfg(any(test, fuzzing))]
3925 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3926 // is simply a duplicate fail, not previously failed and we failed-back too early.
3927 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3931 if !self.context.channel_state.can_generate_new_commitment() {
3932 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3933 force_holding_cell = true;
3936 // Now update local state:
3937 if force_holding_cell {
3938 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3939 match pending_update {
3940 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3941 if htlc_id_arg == htlc_id {
3942 #[cfg(any(test, fuzzing))]
3943 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3947 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3948 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3950 if htlc_id_arg == htlc_id {
3951 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3952 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3958 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3959 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3963 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3964 E::Message::name(), &self.context.channel_id());
3966 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3967 htlc.state = err_contents.clone().to_inbound_htlc_state();
3970 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3973 // Message handlers:
3974 /// Updates the state of the channel to indicate that all channels in the batch have received
3975 /// funding_signed and persisted their monitors.
3976 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3977 /// treated as a non-batch channel going forward.
3978 pub fn set_batch_ready(&mut self) {
3979 self.context.is_batch_funding = None;
3980 self.context.channel_state.clear_waiting_for_batch();
3983 /// Unsets the existing funding information.
3985 /// This must only be used if the channel has not yet completed funding and has not been used.
3987 /// Further, the channel must be immediately shut down after this with a call to
3988 /// [`ChannelContext::force_shutdown`].
3989 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3990 debug_assert!(matches!(
3991 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3993 self.context.channel_transaction_parameters.funding_outpoint = None;
3994 self.context.channel_id = temporary_channel_id;
3997 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3998 /// and the channel is now usable (and public), this may generate an announcement_signatures to
4000 pub fn channel_ready<NS: Deref, L: Deref>(
4001 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
4002 user_config: &UserConfig, best_block: &BestBlock, logger: &L
4003 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
4005 NS::Target: NodeSigner,
4008 if self.context.channel_state.is_peer_disconnected() {
4009 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
4010 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
4013 if let Some(scid_alias) = msg.short_channel_id_alias {
4014 if Some(scid_alias) != self.context.short_channel_id {
4015 // The scid alias provided can be used to route payments *from* our counterparty,
4016 // i.e. can be used for inbound payments and provided in invoices, but is not used
4017 // when routing outbound payments.
4018 self.context.latest_inbound_scid_alias = Some(scid_alias);
4022 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
4023 // batch, but we can receive channel_ready messages.
4024 let mut check_reconnection = false;
4025 match &self.context.channel_state {
4026 ChannelState::AwaitingChannelReady(flags) => {
4027 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
4028 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4029 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
4030 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4031 check_reconnection = true;
4032 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
4033 self.context.channel_state.set_their_channel_ready();
4034 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
4035 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
4036 self.context.update_time_counter += 1;
4038 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
4039 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4042 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4043 ChannelState::ChannelReady(_) => check_reconnection = true,
4044 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4046 if check_reconnection {
4047 // They probably disconnected/reconnected and re-sent the channel_ready, which is
4048 // required, or they're sending a fresh SCID alias.
4049 let expected_point =
4050 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4051 // If they haven't ever sent an updated point, the point they send should match
4053 self.context.counterparty_cur_commitment_point
4054 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4055 // If we've advanced the commitment number once, the second commitment point is
4056 // at `counterparty_prev_commitment_point`, which is not yet revoked.
4057 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4058 self.context.counterparty_prev_commitment_point
4060 // If they have sent updated points, channel_ready is always supposed to match
4061 // their "first" point, which we re-derive here.
4062 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4063 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4064 ).expect("We already advanced, so previous secret keys should have been validated already")))
4066 if expected_point != Some(msg.next_per_commitment_point) {
4067 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4072 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4073 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4075 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4077 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
4080 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4081 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4082 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4083 ) -> Result<(), ChannelError>
4084 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4085 FE::Target: FeeEstimator, L::Target: Logger,
4087 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4088 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4090 // We can't accept HTLCs sent after we've sent a shutdown.
4091 if self.context.channel_state.is_local_shutdown_sent() {
4092 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4094 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4095 if self.context.channel_state.is_remote_shutdown_sent() {
4096 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4098 if self.context.channel_state.is_peer_disconnected() {
4099 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4101 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4102 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4104 if msg.amount_msat == 0 {
4105 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4107 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4108 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4111 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4112 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4113 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
4114 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4116 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4117 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4120 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4121 // the reserve_satoshis we told them to always have as direct payment so that they lose
4122 // something if we punish them for broadcasting an old state).
4123 // Note that we don't really care about having a small/no to_remote output in our local
4124 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4125 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4126 // present in the next commitment transaction we send them (at least for fulfilled ones,
4127 // failed ones won't modify value_to_self).
4128 // Note that we will send HTLCs which another instance of rust-lightning would think
4129 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4130 // Channel state once they will not be present in the next received commitment
4132 let mut removed_outbound_total_msat = 0;
4133 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4134 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4135 removed_outbound_total_msat += htlc.amount_msat;
4136 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4137 removed_outbound_total_msat += htlc.amount_msat;
4141 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4142 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4145 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4146 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4147 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4149 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4150 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4151 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4152 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4153 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4154 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4155 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4159 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4160 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4161 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4162 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4163 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4164 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4165 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4169 let pending_value_to_self_msat =
4170 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
4171 let pending_remote_value_msat =
4172 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4173 if pending_remote_value_msat < msg.amount_msat {
4174 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4177 // Check that the remote can afford to pay for this HTLC on-chain at the current
4178 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4180 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4181 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4182 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4184 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4185 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4189 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4190 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4192 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4193 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4197 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4198 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4202 if !self.context.is_outbound() {
4203 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4204 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
4205 // side, only on the sender's. Note that with anchor outputs we are no longer as
4206 // sensitive to fee spikes, so we need to account for them.
4207 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4208 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4209 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4210 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4212 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4213 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
4214 // the HTLC, i.e. its status is already set to failing.
4215 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4216 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4219 // Check that they won't violate our local required channel reserve by adding this HTLC.
4220 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4221 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4222 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4223 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4226 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4227 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4229 if msg.cltv_expiry >= 500000000 {
4230 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4233 if self.context.channel_state.is_local_shutdown_sent() {
4234 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4235 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4239 // Now update local state:
4240 self.context.next_counterparty_htlc_id += 1;
4241 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4242 htlc_id: msg.htlc_id,
4243 amount_msat: msg.amount_msat,
4244 payment_hash: msg.payment_hash,
4245 cltv_expiry: msg.cltv_expiry,
4246 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
4251 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4253 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4254 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4255 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4256 if htlc.htlc_id == htlc_id {
4257 let outcome = match check_preimage {
4258 None => fail_reason.into(),
4259 Some(payment_preimage) => {
4260 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4261 if payment_hash != htlc.payment_hash {
4262 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4264 OutboundHTLCOutcome::Success(Some(payment_preimage))
4268 OutboundHTLCState::LocalAnnounced(_) =>
4269 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4270 OutboundHTLCState::Committed => {
4271 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4273 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4274 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4279 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4282 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4283 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4284 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4286 if self.context.channel_state.is_peer_disconnected() {
4287 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4290 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4293 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4294 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4295 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4297 if self.context.channel_state.is_peer_disconnected() {
4298 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4301 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4305 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4306 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4307 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4309 if self.context.channel_state.is_peer_disconnected() {
4310 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4313 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4317 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4318 where L::Target: Logger
4320 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4321 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4323 if self.context.channel_state.is_peer_disconnected() {
4324 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4326 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4327 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4330 let funding_script = self.context.get_funding_redeemscript();
4332 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4334 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4335 let commitment_txid = {
4336 let trusted_tx = commitment_stats.tx.trust();
4337 let bitcoin_tx = trusted_tx.built_transaction();
4338 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4340 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4341 log_bytes!(msg.signature.serialize_compact()[..]),
4342 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4343 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4344 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4345 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4349 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4351 // If our counterparty updated the channel fee in this commitment transaction, check that
4352 // they can actually afford the new fee now.
4353 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4354 update_state == FeeUpdateState::RemoteAnnounced
4357 debug_assert!(!self.context.is_outbound());
4358 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4359 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4360 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4363 #[cfg(any(test, fuzzing))]
4365 if self.context.is_outbound() {
4366 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4367 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4368 if let Some(info) = projected_commit_tx_info {
4369 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4370 + self.context.holding_cell_htlc_updates.len();
4371 if info.total_pending_htlcs == total_pending_htlcs
4372 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4373 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4374 && info.feerate == self.context.feerate_per_kw {
4375 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4381 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4382 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4385 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4386 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4387 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4388 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4389 // backwards compatibility, we never use it in production. To provide test coverage, here,
4390 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4391 #[allow(unused_assignments, unused_mut)]
4392 let mut separate_nondust_htlc_sources = false;
4393 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4394 use core::hash::{BuildHasher, Hasher};
4395 // Get a random value using the only std API to do so - the DefaultHasher
4396 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4397 separate_nondust_htlc_sources = rand_val % 2 == 0;
4400 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4401 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4402 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4403 if let Some(_) = htlc.transaction_output_index {
4404 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4405 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4406 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4408 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4409 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4410 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4411 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4412 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4413 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4414 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4415 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4417 if !separate_nondust_htlc_sources {
4418 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4421 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4423 if separate_nondust_htlc_sources {
4424 if let Some(source) = source_opt.take() {
4425 nondust_htlc_sources.push(source);
4428 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4431 let holder_commitment_tx = HolderCommitmentTransaction::new(
4432 commitment_stats.tx,
4434 msg.htlc_signatures.clone(),
4435 &self.context.get_holder_pubkeys().funding_pubkey,
4436 self.context.counterparty_funding_pubkey()
4439 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4440 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4442 // Update state now that we've passed all the can-fail calls...
4443 let mut need_commitment = false;
4444 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4445 if *update_state == FeeUpdateState::RemoteAnnounced {
4446 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4447 need_commitment = true;
4451 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4452 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
4453 Some(forward_info.clone())
4455 if let Some(forward_info) = new_forward {
4456 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4457 &htlc.payment_hash, &self.context.channel_id);
4458 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
4459 need_commitment = true;
4462 let mut claimed_htlcs = Vec::new();
4463 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4464 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4465 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4466 &htlc.payment_hash, &self.context.channel_id);
4467 // Grab the preimage, if it exists, instead of cloning
4468 let mut reason = OutboundHTLCOutcome::Success(None);
4469 mem::swap(outcome, &mut reason);
4470 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4471 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4472 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4473 // have a `Success(None)` reason. In this case we could forget some HTLC
4474 // claims, but such an upgrade is unlikely and including claimed HTLCs here
4475 // fixes a bug which the user was exposed to on 0.0.104 when they started the
4477 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4479 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4480 need_commitment = true;
4484 self.context.latest_monitor_update_id += 1;
4485 let mut monitor_update = ChannelMonitorUpdate {
4486 update_id: self.context.latest_monitor_update_id,
4487 counterparty_node_id: Some(self.context.counterparty_node_id),
4488 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4489 commitment_tx: holder_commitment_tx,
4490 htlc_outputs: htlcs_and_sigs,
4492 nondust_htlc_sources,
4494 channel_id: Some(self.context.channel_id()),
4497 self.context.cur_holder_commitment_transaction_number -= 1;
4498 self.context.expecting_peer_commitment_signed = false;
4499 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4500 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4501 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4503 if self.context.channel_state.is_monitor_update_in_progress() {
4504 // In case we initially failed monitor updating without requiring a response, we need
4505 // to make sure the RAA gets sent first.
4506 self.context.monitor_pending_revoke_and_ack = true;
4507 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4508 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4509 // the corresponding HTLC status updates so that
4510 // get_last_commitment_update_for_send includes the right HTLCs.
4511 self.context.monitor_pending_commitment_signed = true;
4512 let mut additional_update = self.build_commitment_no_status_check(logger);
4513 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4514 // strictly increasing by one, so decrement it here.
4515 self.context.latest_monitor_update_id = monitor_update.update_id;
4516 monitor_update.updates.append(&mut additional_update.updates);
4518 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4519 &self.context.channel_id);
4520 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4523 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4524 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4525 // we'll send one right away when we get the revoke_and_ack when we
4526 // free_holding_cell_htlcs().
4527 let mut additional_update = self.build_commitment_no_status_check(logger);
4528 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4529 // strictly increasing by one, so decrement it here.
4530 self.context.latest_monitor_update_id = monitor_update.update_id;
4531 monitor_update.updates.append(&mut additional_update.updates);
4535 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4536 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4537 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4538 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4541 /// Public version of the below, checking relevant preconditions first.
4542 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4543 /// returns `(None, Vec::new())`.
4544 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4545 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4546 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4547 where F::Target: FeeEstimator, L::Target: Logger
4549 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4550 self.free_holding_cell_htlcs(fee_estimator, logger)
4551 } else { (None, Vec::new()) }
4554 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4555 /// for our counterparty.
4556 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4557 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4558 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4559 where F::Target: FeeEstimator, L::Target: Logger
4561 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4562 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4563 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4564 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4566 let mut monitor_update = ChannelMonitorUpdate {
4567 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4568 counterparty_node_id: Some(self.context.counterparty_node_id),
4569 updates: Vec::new(),
4570 channel_id: Some(self.context.channel_id()),
4573 let mut htlc_updates = Vec::new();
4574 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4575 let mut update_add_count = 0;
4576 let mut update_fulfill_count = 0;
4577 let mut update_fail_count = 0;
4578 let mut htlcs_to_fail = Vec::new();
4579 for htlc_update in htlc_updates.drain(..) {
4580 // Note that this *can* fail, though it should be due to rather-rare conditions on
4581 // fee races with adding too many outputs which push our total payments just over
4582 // the limit. In case it's less rare than I anticipate, we may want to revisit
4583 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4584 // to rebalance channels.
4585 let fail_htlc_res = match &htlc_update {
4586 &HTLCUpdateAwaitingACK::AddHTLC {
4587 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4588 skimmed_fee_msat, blinding_point, ..
4590 match self.send_htlc(
4591 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4592 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4594 Ok(_) => update_add_count += 1,
4597 ChannelError::Ignore(ref msg) => {
4598 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4599 // If we fail to send here, then this HTLC should
4600 // be failed backwards. Failing to send here
4601 // indicates that this HTLC may keep being put back
4602 // into the holding cell without ever being
4603 // successfully forwarded/failed/fulfilled, causing
4604 // our counterparty to eventually close on us.
4605 htlcs_to_fail.push((source.clone(), *payment_hash));
4608 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4615 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4616 // If an HTLC claim was previously added to the holding cell (via
4617 // `get_update_fulfill_htlc`, then generating the claim message itself must
4618 // not fail - any in between attempts to claim the HTLC will have resulted
4619 // in it hitting the holding cell again and we cannot change the state of a
4620 // holding cell HTLC from fulfill to anything else.
4621 let mut additional_monitor_update =
4622 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4623 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4624 { monitor_update } else { unreachable!() };
4625 update_fulfill_count += 1;
4626 monitor_update.updates.append(&mut additional_monitor_update.updates);
4629 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4630 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4631 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4633 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4634 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4635 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4638 if let Some(res) = fail_htlc_res {
4640 Ok(fail_msg_opt) => {
4641 // If an HTLC failure was previously added to the holding cell (via
4642 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4643 // not fail - we should never end up in a state where we double-fail
4644 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4645 // for a full revocation before failing.
4646 debug_assert!(fail_msg_opt.is_some());
4647 update_fail_count += 1;
4649 Err(ChannelError::Ignore(_)) => {},
4651 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4656 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4657 return (None, htlcs_to_fail);
4659 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4660 self.send_update_fee(feerate, false, fee_estimator, logger)
4665 let mut additional_update = self.build_commitment_no_status_check(logger);
4666 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4667 // but we want them to be strictly increasing by one, so reset it here.
4668 self.context.latest_monitor_update_id = monitor_update.update_id;
4669 monitor_update.updates.append(&mut additional_update.updates);
4671 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4672 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4673 update_add_count, update_fulfill_count, update_fail_count);
4675 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4676 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4682 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4683 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4684 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4685 /// generating an appropriate error *after* the channel state has been updated based on the
4686 /// revoke_and_ack message.
4687 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4688 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4689 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4690 where F::Target: FeeEstimator, L::Target: Logger,
4692 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4693 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4695 if self.context.channel_state.is_peer_disconnected() {
4696 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4698 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4699 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4702 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4704 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4705 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4706 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4710 if !self.context.channel_state.is_awaiting_remote_revoke() {
4711 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4712 // haven't given them a new commitment transaction to broadcast). We should probably
4713 // take advantage of this by updating our channel monitor, sending them an error, and
4714 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4715 // lot of work, and there's some chance this is all a misunderstanding anyway.
4716 // We have to do *something*, though, since our signer may get mad at us for otherwise
4717 // jumping a remote commitment number, so best to just force-close and move on.
4718 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4721 #[cfg(any(test, fuzzing))]
4723 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4724 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4727 match &self.context.holder_signer {
4728 ChannelSignerType::Ecdsa(ecdsa) => {
4729 ecdsa.validate_counterparty_revocation(
4730 self.context.cur_counterparty_commitment_transaction_number + 1,
4732 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4734 // TODO (taproot|arik)
4739 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4740 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4741 self.context.latest_monitor_update_id += 1;
4742 let mut monitor_update = ChannelMonitorUpdate {
4743 update_id: self.context.latest_monitor_update_id,
4744 counterparty_node_id: Some(self.context.counterparty_node_id),
4745 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4746 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4747 secret: msg.per_commitment_secret,
4749 channel_id: Some(self.context.channel_id()),
4752 // Update state now that we've passed all the can-fail calls...
4753 // (note that we may still fail to generate the new commitment_signed message, but that's
4754 // OK, we step the channel here and *then* if the new generation fails we can fail the
4755 // channel based on that, but stepping stuff here should be safe either way.
4756 self.context.channel_state.clear_awaiting_remote_revoke();
4757 self.context.sent_message_awaiting_response = None;
4758 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4759 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4760 self.context.cur_counterparty_commitment_transaction_number -= 1;
4762 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4763 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4766 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4767 let mut to_forward_infos = Vec::new();
4768 let mut revoked_htlcs = Vec::new();
4769 let mut finalized_claimed_htlcs = Vec::new();
4770 let mut update_fail_htlcs = Vec::new();
4771 let mut update_fail_malformed_htlcs = Vec::new();
4772 let mut require_commitment = false;
4773 let mut value_to_self_msat_diff: i64 = 0;
4776 // Take references explicitly so that we can hold multiple references to self.context.
4777 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4778 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4779 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4781 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4782 pending_inbound_htlcs.retain(|htlc| {
4783 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4784 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4785 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4786 value_to_self_msat_diff += htlc.amount_msat as i64;
4788 *expecting_peer_commitment_signed = true;
4792 pending_outbound_htlcs.retain(|htlc| {
4793 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4794 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4795 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4796 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4798 finalized_claimed_htlcs.push(htlc.source.clone());
4799 // They fulfilled, so we sent them money
4800 value_to_self_msat_diff -= htlc.amount_msat as i64;
4805 for htlc in pending_inbound_htlcs.iter_mut() {
4806 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4808 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4812 let mut state = InboundHTLCState::Committed;
4813 mem::swap(&mut state, &mut htlc.state);
4815 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4816 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4817 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4818 require_commitment = true;
4819 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4820 match forward_info {
4821 PendingHTLCStatus::Fail(fail_msg) => {
4822 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4823 require_commitment = true;
4825 HTLCFailureMsg::Relay(msg) => {
4826 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4827 update_fail_htlcs.push(msg)
4829 HTLCFailureMsg::Malformed(msg) => {
4830 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4831 update_fail_malformed_htlcs.push(msg)
4835 PendingHTLCStatus::Forward(forward_info) => {
4836 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4837 to_forward_infos.push((forward_info, htlc.htlc_id));
4838 htlc.state = InboundHTLCState::Committed;
4844 for htlc in pending_outbound_htlcs.iter_mut() {
4845 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4846 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4847 htlc.state = OutboundHTLCState::Committed;
4848 *expecting_peer_commitment_signed = true;
4850 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4851 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4852 // Grab the preimage, if it exists, instead of cloning
4853 let mut reason = OutboundHTLCOutcome::Success(None);
4854 mem::swap(outcome, &mut reason);
4855 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4856 require_commitment = true;
4860 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4862 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4863 match update_state {
4864 FeeUpdateState::Outbound => {
4865 debug_assert!(self.context.is_outbound());
4866 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4867 self.context.feerate_per_kw = feerate;
4868 self.context.pending_update_fee = None;
4869 self.context.expecting_peer_commitment_signed = true;
4871 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4872 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4873 debug_assert!(!self.context.is_outbound());
4874 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4875 require_commitment = true;
4876 self.context.feerate_per_kw = feerate;
4877 self.context.pending_update_fee = None;
4882 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4883 let release_state_str =
4884 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4885 macro_rules! return_with_htlcs_to_fail {
4886 ($htlcs_to_fail: expr) => {
4887 if !release_monitor {
4888 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4889 update: monitor_update,
4891 return Ok(($htlcs_to_fail, None));
4893 return Ok(($htlcs_to_fail, Some(monitor_update)));
4898 if self.context.channel_state.is_monitor_update_in_progress() {
4899 // We can't actually generate a new commitment transaction (incl by freeing holding
4900 // cells) while we can't update the monitor, so we just return what we have.
4901 if require_commitment {
4902 self.context.monitor_pending_commitment_signed = true;
4903 // When the monitor updating is restored we'll call
4904 // get_last_commitment_update_for_send(), which does not update state, but we're
4905 // definitely now awaiting a remote revoke before we can step forward any more, so
4907 let mut additional_update = self.build_commitment_no_status_check(logger);
4908 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4909 // strictly increasing by one, so decrement it here.
4910 self.context.latest_monitor_update_id = monitor_update.update_id;
4911 monitor_update.updates.append(&mut additional_update.updates);
4913 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4914 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4915 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4916 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4917 return_with_htlcs_to_fail!(Vec::new());
4920 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4921 (Some(mut additional_update), htlcs_to_fail) => {
4922 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4923 // strictly increasing by one, so decrement it here.
4924 self.context.latest_monitor_update_id = monitor_update.update_id;
4925 monitor_update.updates.append(&mut additional_update.updates);
4927 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4928 &self.context.channel_id(), release_state_str);
4930 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4931 return_with_htlcs_to_fail!(htlcs_to_fail);
4933 (None, htlcs_to_fail) => {
4934 if require_commitment {
4935 let mut additional_update = self.build_commitment_no_status_check(logger);
4937 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4938 // strictly increasing by one, so decrement it here.
4939 self.context.latest_monitor_update_id = monitor_update.update_id;
4940 monitor_update.updates.append(&mut additional_update.updates);
4942 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4943 &self.context.channel_id(),
4944 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4947 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4948 return_with_htlcs_to_fail!(htlcs_to_fail);
4950 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4951 &self.context.channel_id(), release_state_str);
4953 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4954 return_with_htlcs_to_fail!(htlcs_to_fail);
4960 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4961 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4962 /// commitment update.
4963 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4964 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4965 where F::Target: FeeEstimator, L::Target: Logger
4967 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4968 assert!(msg_opt.is_none(), "We forced holding cell?");
4971 /// Adds a pending update to this channel. See the doc for send_htlc for
4972 /// further details on the optionness of the return value.
4973 /// If our balance is too low to cover the cost of the next commitment transaction at the
4974 /// new feerate, the update is cancelled.
4976 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4977 /// [`Channel`] if `force_holding_cell` is false.
4978 fn send_update_fee<F: Deref, L: Deref>(
4979 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4980 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4981 ) -> Option<msgs::UpdateFee>
4982 where F::Target: FeeEstimator, L::Target: Logger
4984 if !self.context.is_outbound() {
4985 panic!("Cannot send fee from inbound channel");
4987 if !self.context.is_usable() {
4988 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4990 if !self.context.is_live() {
4991 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4994 // Before proposing a feerate update, check that we can actually afford the new fee.
4995 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4996 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4997 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4998 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4999 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
5000 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
5001 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
5002 //TODO: auto-close after a number of failures?
5003 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
5007 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
5008 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5009 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5010 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5011 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5012 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5015 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5016 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5020 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
5021 force_holding_cell = true;
5024 if force_holding_cell {
5025 self.context.holding_cell_update_fee = Some(feerate_per_kw);
5029 debug_assert!(self.context.pending_update_fee.is_none());
5030 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
5032 Some(msgs::UpdateFee {
5033 channel_id: self.context.channel_id,
5038 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
5039 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
5041 /// No further message handling calls may be made until a channel_reestablish dance has
5043 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5044 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5045 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5046 if self.context.channel_state.is_pre_funded_state() {
5050 if self.context.channel_state.is_peer_disconnected() {
5051 // While the below code should be idempotent, it's simpler to just return early, as
5052 // redundant disconnect events can fire, though they should be rare.
5056 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5057 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5060 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5061 // will be retransmitted.
5062 self.context.last_sent_closing_fee = None;
5063 self.context.pending_counterparty_closing_signed = None;
5064 self.context.closing_fee_limits = None;
5066 let mut inbound_drop_count = 0;
5067 self.context.pending_inbound_htlcs.retain(|htlc| {
5069 InboundHTLCState::RemoteAnnounced(_) => {
5070 // They sent us an update_add_htlc but we never got the commitment_signed.
5071 // We'll tell them what commitment_signed we're expecting next and they'll drop
5072 // this HTLC accordingly
5073 inbound_drop_count += 1;
5076 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5077 // We received a commitment_signed updating this HTLC and (at least hopefully)
5078 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5079 // in response to it yet, so don't touch it.
5082 InboundHTLCState::Committed => true,
5083 InboundHTLCState::LocalRemoved(_) => {
5084 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5085 // re-transmit if needed) and they may have even sent a revoke_and_ack back
5086 // (that we missed). Keep this around for now and if they tell us they missed
5087 // the commitment_signed we can re-transmit the update then.
5092 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5094 if let Some((_, update_state)) = self.context.pending_update_fee {
5095 if update_state == FeeUpdateState::RemoteAnnounced {
5096 debug_assert!(!self.context.is_outbound());
5097 self.context.pending_update_fee = None;
5101 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5102 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5103 // They sent us an update to remove this but haven't yet sent the corresponding
5104 // commitment_signed, we need to move it back to Committed and they can re-send
5105 // the update upon reconnection.
5106 htlc.state = OutboundHTLCState::Committed;
5110 self.context.sent_message_awaiting_response = None;
5112 self.context.channel_state.set_peer_disconnected();
5113 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5117 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5118 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5119 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5120 /// update completes (potentially immediately).
5121 /// The messages which were generated with the monitor update must *not* have been sent to the
5122 /// remote end, and must instead have been dropped. They will be regenerated when
5123 /// [`Self::monitor_updating_restored`] is called.
5125 /// [`ChannelManager`]: super::channelmanager::ChannelManager
5126 /// [`chain::Watch`]: crate::chain::Watch
5127 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5128 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5129 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5130 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5131 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5133 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5134 self.context.monitor_pending_commitment_signed |= resend_commitment;
5135 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5136 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5137 self.context.monitor_pending_failures.append(&mut pending_fails);
5138 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5139 self.context.channel_state.set_monitor_update_in_progress();
5142 /// Indicates that the latest ChannelMonitor update has been committed by the client
5143 /// successfully and we should restore normal operation. Returns messages which should be sent
5144 /// to the remote side.
5145 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5146 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5147 user_config: &UserConfig, best_block_height: u32
5148 ) -> MonitorRestoreUpdates
5151 NS::Target: NodeSigner
5153 assert!(self.context.channel_state.is_monitor_update_in_progress());
5154 self.context.channel_state.clear_monitor_update_in_progress();
5156 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5157 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5158 // first received the funding_signed.
5159 let mut funding_broadcastable =
5160 if self.context.is_outbound() &&
5161 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5162 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5164 self.context.funding_transaction.take()
5166 // That said, if the funding transaction is already confirmed (ie we're active with a
5167 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5168 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5169 funding_broadcastable = None;
5172 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5173 // (and we assume the user never directly broadcasts the funding transaction and waits for
5174 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5175 // * an inbound channel that failed to persist the monitor on funding_created and we got
5176 // the funding transaction confirmed before the monitor was persisted, or
5177 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5178 let channel_ready = if self.context.monitor_pending_channel_ready {
5179 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5180 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5181 self.context.monitor_pending_channel_ready = false;
5182 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5183 Some(msgs::ChannelReady {
5184 channel_id: self.context.channel_id(),
5185 next_per_commitment_point,
5186 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5190 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5192 let mut accepted_htlcs = Vec::new();
5193 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5194 let mut failed_htlcs = Vec::new();
5195 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5196 let mut finalized_claimed_htlcs = Vec::new();
5197 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5199 if self.context.channel_state.is_peer_disconnected() {
5200 self.context.monitor_pending_revoke_and_ack = false;
5201 self.context.monitor_pending_commitment_signed = false;
5202 return MonitorRestoreUpdates {
5203 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5204 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5208 let raa = if self.context.monitor_pending_revoke_and_ack {
5209 Some(self.get_last_revoke_and_ack())
5211 let commitment_update = if self.context.monitor_pending_commitment_signed {
5212 self.get_last_commitment_update_for_send(logger).ok()
5214 if commitment_update.is_some() {
5215 self.mark_awaiting_response();
5218 self.context.monitor_pending_revoke_and_ack = false;
5219 self.context.monitor_pending_commitment_signed = false;
5220 let order = self.context.resend_order.clone();
5221 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5222 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5223 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5224 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5225 MonitorRestoreUpdates {
5226 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5230 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5231 where F::Target: FeeEstimator, L::Target: Logger
5233 if self.context.is_outbound() {
5234 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5236 if self.context.channel_state.is_peer_disconnected() {
5237 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5239 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5241 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5242 self.context.update_time_counter += 1;
5243 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5244 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
5245 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
5246 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
5247 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5248 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5249 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5250 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5251 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5252 msg.feerate_per_kw, holder_tx_dust_exposure)));
5254 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5255 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5256 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
5262 /// Indicates that the signer may have some signatures for us, so we should retry if we're
5264 #[cfg(async_signing)]
5265 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5266 let commitment_update = if self.context.signer_pending_commitment_update {
5267 self.get_last_commitment_update_for_send(logger).ok()
5269 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5270 self.context.get_funding_signed_msg(logger).1
5272 let channel_ready = if funding_signed.is_some() {
5273 self.check_get_channel_ready(0)
5276 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5277 if commitment_update.is_some() { "a" } else { "no" },
5278 if funding_signed.is_some() { "a" } else { "no" },
5279 if channel_ready.is_some() { "a" } else { "no" });
5281 SignerResumeUpdates {
5288 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5289 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5290 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5291 msgs::RevokeAndACK {
5292 channel_id: self.context.channel_id,
5293 per_commitment_secret,
5294 next_per_commitment_point,
5296 next_local_nonce: None,
5300 /// Gets the last commitment update for immediate sending to our peer.
5301 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5302 let mut update_add_htlcs = Vec::new();
5303 let mut update_fulfill_htlcs = Vec::new();
5304 let mut update_fail_htlcs = Vec::new();
5305 let mut update_fail_malformed_htlcs = Vec::new();
5307 for htlc in self.context.pending_outbound_htlcs.iter() {
5308 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5309 update_add_htlcs.push(msgs::UpdateAddHTLC {
5310 channel_id: self.context.channel_id(),
5311 htlc_id: htlc.htlc_id,
5312 amount_msat: htlc.amount_msat,
5313 payment_hash: htlc.payment_hash,
5314 cltv_expiry: htlc.cltv_expiry,
5315 onion_routing_packet: (**onion_packet).clone(),
5316 skimmed_fee_msat: htlc.skimmed_fee_msat,
5317 blinding_point: htlc.blinding_point,
5322 for htlc in self.context.pending_inbound_htlcs.iter() {
5323 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5325 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5326 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5327 channel_id: self.context.channel_id(),
5328 htlc_id: htlc.htlc_id,
5329 reason: err_packet.clone()
5332 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5333 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5334 channel_id: self.context.channel_id(),
5335 htlc_id: htlc.htlc_id,
5336 sha256_of_onion: sha256_of_onion.clone(),
5337 failure_code: failure_code.clone(),
5340 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5341 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5342 channel_id: self.context.channel_id(),
5343 htlc_id: htlc.htlc_id,
5344 payment_preimage: payment_preimage.clone(),
5351 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5352 Some(msgs::UpdateFee {
5353 channel_id: self.context.channel_id(),
5354 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5358 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5359 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5360 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5361 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5362 if self.context.signer_pending_commitment_update {
5363 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5364 self.context.signer_pending_commitment_update = false;
5368 #[cfg(not(async_signing))] {
5369 panic!("Failed to get signature for new commitment state");
5371 #[cfg(async_signing)] {
5372 if !self.context.signer_pending_commitment_update {
5373 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5374 self.context.signer_pending_commitment_update = true;
5379 Ok(msgs::CommitmentUpdate {
5380 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5385 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5386 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5387 if self.context.channel_state.is_local_shutdown_sent() {
5388 assert!(self.context.shutdown_scriptpubkey.is_some());
5389 Some(msgs::Shutdown {
5390 channel_id: self.context.channel_id,
5391 scriptpubkey: self.get_closing_scriptpubkey(),
5396 /// May panic if some calls other than message-handling calls (which will all Err immediately)
5397 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5399 /// Some links printed in log lines are included here to check them during build (when run with
5400 /// `cargo doc --document-private-items`):
5401 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5402 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5403 pub fn channel_reestablish<L: Deref, NS: Deref>(
5404 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5405 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5406 ) -> Result<ReestablishResponses, ChannelError>
5409 NS::Target: NodeSigner
5411 if !self.context.channel_state.is_peer_disconnected() {
5412 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5413 // almost certainly indicates we are going to end up out-of-sync in some way, so we
5414 // just close here instead of trying to recover.
5415 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5418 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5419 msg.next_local_commitment_number == 0 {
5420 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5423 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5424 if msg.next_remote_commitment_number > 0 {
5425 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5426 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5427 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5428 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5429 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5431 if msg.next_remote_commitment_number > our_commitment_transaction {
5432 macro_rules! log_and_panic {
5433 ($err_msg: expr) => {
5434 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5435 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5438 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5439 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5440 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5441 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5442 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5443 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5444 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5445 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5449 // Before we change the state of the channel, we check if the peer is sending a very old
5450 // commitment transaction number, if yes we send a warning message.
5451 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5452 return Err(ChannelError::Warn(format!(
5453 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5454 msg.next_remote_commitment_number,
5455 our_commitment_transaction
5459 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5460 // remaining cases either succeed or ErrorMessage-fail).
5461 self.context.channel_state.clear_peer_disconnected();
5462 self.context.sent_message_awaiting_response = None;
5464 let shutdown_msg = self.get_outbound_shutdown();
5466 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
5468 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5469 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5470 if !self.context.channel_state.is_our_channel_ready() ||
5471 self.context.channel_state.is_monitor_update_in_progress() {
5472 if msg.next_remote_commitment_number != 0 {
5473 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5475 // Short circuit the whole handler as there is nothing we can resend them
5476 return Ok(ReestablishResponses {
5477 channel_ready: None,
5478 raa: None, commitment_update: None,
5479 order: RAACommitmentOrder::CommitmentFirst,
5480 shutdown_msg, announcement_sigs,
5484 // We have OurChannelReady set!
5485 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5486 return Ok(ReestablishResponses {
5487 channel_ready: Some(msgs::ChannelReady {
5488 channel_id: self.context.channel_id(),
5489 next_per_commitment_point,
5490 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5492 raa: None, commitment_update: None,
5493 order: RAACommitmentOrder::CommitmentFirst,
5494 shutdown_msg, announcement_sigs,
5498 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5499 // Remote isn't waiting on any RevokeAndACK from us!
5500 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5502 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5503 if self.context.channel_state.is_monitor_update_in_progress() {
5504 self.context.monitor_pending_revoke_and_ack = true;
5507 Some(self.get_last_revoke_and_ack())
5510 debug_assert!(false, "All values should have been handled in the four cases above");
5511 return Err(ChannelError::Close(format!(
5512 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5513 msg.next_remote_commitment_number,
5514 our_commitment_transaction
5518 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5519 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5520 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5521 // the corresponding revoke_and_ack back yet.
5522 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5523 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5524 self.mark_awaiting_response();
5526 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5528 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5529 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5530 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5531 Some(msgs::ChannelReady {
5532 channel_id: self.context.channel_id(),
5533 next_per_commitment_point,
5534 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5538 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5539 if required_revoke.is_some() {
5540 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5542 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5545 Ok(ReestablishResponses {
5546 channel_ready, shutdown_msg, announcement_sigs,
5547 raa: required_revoke,
5548 commitment_update: None,
5549 order: self.context.resend_order.clone(),
5551 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5552 if required_revoke.is_some() {
5553 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5555 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5558 if self.context.channel_state.is_monitor_update_in_progress() {
5559 self.context.monitor_pending_commitment_signed = true;
5560 Ok(ReestablishResponses {
5561 channel_ready, shutdown_msg, announcement_sigs,
5562 commitment_update: None, raa: None,
5563 order: self.context.resend_order.clone(),
5566 Ok(ReestablishResponses {
5567 channel_ready, shutdown_msg, announcement_sigs,
5568 raa: required_revoke,
5569 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5570 order: self.context.resend_order.clone(),
5573 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5574 Err(ChannelError::Close(format!(
5575 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5576 msg.next_local_commitment_number,
5577 next_counterparty_commitment_number,
5580 Err(ChannelError::Close(format!(
5581 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5582 msg.next_local_commitment_number,
5583 next_counterparty_commitment_number,
5588 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5589 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5590 /// at which point they will be recalculated.
5591 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5593 where F::Target: FeeEstimator
5595 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5597 // Propose a range from our current Background feerate to our Normal feerate plus our
5598 // force_close_avoidance_max_fee_satoshis.
5599 // If we fail to come to consensus, we'll have to force-close.
5600 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5601 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5602 // that we don't expect to need fee bumping
5603 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5604 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5606 // The spec requires that (when the channel does not have anchors) we only send absolute
5607 // channel fees no greater than the absolute channel fee on the current commitment
5608 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5609 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5610 // some force-closure by old nodes, but we wanted to close the channel anyway.
5612 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5613 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5614 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5615 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5618 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5619 // below our dust limit, causing the output to disappear. We don't bother handling this
5620 // case, however, as this should only happen if a channel is closed before any (material)
5621 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5622 // come to consensus with our counterparty on appropriate fees, however it should be a
5623 // relatively rare case. We can revisit this later, though note that in order to determine
5624 // if the funders' output is dust we have to know the absolute fee we're going to use.
5625 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5626 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5627 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5628 // We always add force_close_avoidance_max_fee_satoshis to our normal
5629 // feerate-calculated fee, but allow the max to be overridden if we're using a
5630 // target feerate-calculated fee.
5631 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5632 proposed_max_feerate as u64 * tx_weight / 1000)
5634 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5637 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5638 self.context.closing_fee_limits.clone().unwrap()
5641 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5642 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5643 /// this point if we're the funder we should send the initial closing_signed, and in any case
5644 /// shutdown should complete within a reasonable timeframe.
5645 fn closing_negotiation_ready(&self) -> bool {
5646 self.context.closing_negotiation_ready()
5649 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5650 /// an Err if no progress is being made and the channel should be force-closed instead.
5651 /// Should be called on a one-minute timer.
5652 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5653 if self.closing_negotiation_ready() {
5654 if self.context.closing_signed_in_flight {
5655 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5657 self.context.closing_signed_in_flight = true;
5663 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5664 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5665 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5666 where F::Target: FeeEstimator, L::Target: Logger
5668 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5669 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5670 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5671 // that closing_negotiation_ready checks this case (as well as a few others).
5672 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5673 return Ok((None, None, None));
5676 if !self.context.is_outbound() {
5677 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5678 return self.closing_signed(fee_estimator, &msg);
5680 return Ok((None, None, None));
5683 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5684 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5685 if self.context.expecting_peer_commitment_signed {
5686 return Ok((None, None, None));
5689 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5691 assert!(self.context.shutdown_scriptpubkey.is_some());
5692 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5693 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5694 our_min_fee, our_max_fee, total_fee_satoshis);
5696 match &self.context.holder_signer {
5697 ChannelSignerType::Ecdsa(ecdsa) => {
5699 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5700 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5702 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5703 Ok((Some(msgs::ClosingSigned {
5704 channel_id: self.context.channel_id,
5705 fee_satoshis: total_fee_satoshis,
5707 fee_range: Some(msgs::ClosingSignedFeeRange {
5708 min_fee_satoshis: our_min_fee,
5709 max_fee_satoshis: our_max_fee,
5713 // TODO (taproot|arik)
5719 // Marks a channel as waiting for a response from the counterparty. If it's not received
5720 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5722 fn mark_awaiting_response(&mut self) {
5723 self.context.sent_message_awaiting_response = Some(0);
5726 /// Determines whether we should disconnect the counterparty due to not receiving a response
5727 /// within our expected timeframe.
5729 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5730 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5731 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5734 // Don't disconnect when we're not waiting on a response.
5737 *ticks_elapsed += 1;
5738 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5742 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5743 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5745 if self.context.channel_state.is_peer_disconnected() {
5746 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5748 if self.context.channel_state.is_pre_funded_state() {
5749 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5750 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5751 // can do that via error message without getting a connection fail anyway...
5752 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5754 for htlc in self.context.pending_inbound_htlcs.iter() {
5755 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5756 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5759 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5761 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5762 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5765 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5766 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5767 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5770 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5773 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5774 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5775 // any further commitment updates after we set LocalShutdownSent.
5776 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5778 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5781 assert!(send_shutdown);
5782 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5783 Ok(scriptpubkey) => scriptpubkey,
5784 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5786 if !shutdown_scriptpubkey.is_compatible(their_features) {
5787 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5789 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5794 // From here on out, we may not fail!
5796 self.context.channel_state.set_remote_shutdown_sent();
5797 self.context.update_time_counter += 1;
5799 let monitor_update = if update_shutdown_script {
5800 self.context.latest_monitor_update_id += 1;
5801 let monitor_update = ChannelMonitorUpdate {
5802 update_id: self.context.latest_monitor_update_id,
5803 counterparty_node_id: Some(self.context.counterparty_node_id),
5804 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5805 scriptpubkey: self.get_closing_scriptpubkey(),
5807 channel_id: Some(self.context.channel_id()),
5809 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5810 self.push_ret_blockable_mon_update(monitor_update)
5812 let shutdown = if send_shutdown {
5813 Some(msgs::Shutdown {
5814 channel_id: self.context.channel_id,
5815 scriptpubkey: self.get_closing_scriptpubkey(),
5819 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5820 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5821 // cell HTLCs and return them to fail the payment.
5822 self.context.holding_cell_update_fee = None;
5823 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5824 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5826 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5827 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5834 self.context.channel_state.set_local_shutdown_sent();
5835 self.context.update_time_counter += 1;
5837 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5840 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5841 let mut tx = closing_tx.trust().built_transaction().clone();
5843 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5845 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5846 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5847 let mut holder_sig = sig.serialize_der().to_vec();
5848 holder_sig.push(EcdsaSighashType::All as u8);
5849 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5850 cp_sig.push(EcdsaSighashType::All as u8);
5851 if funding_key[..] < counterparty_funding_key[..] {
5852 tx.input[0].witness.push(holder_sig);
5853 tx.input[0].witness.push(cp_sig);
5855 tx.input[0].witness.push(cp_sig);
5856 tx.input[0].witness.push(holder_sig);
5859 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5863 pub fn closing_signed<F: Deref>(
5864 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5865 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5866 where F::Target: FeeEstimator
5868 if !self.context.channel_state.is_both_sides_shutdown() {
5869 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5871 if self.context.channel_state.is_peer_disconnected() {
5872 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5874 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5875 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5877 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5878 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5881 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5882 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5885 if self.context.channel_state.is_monitor_update_in_progress() {
5886 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5887 return Ok((None, None, None));
5890 let funding_redeemscript = self.context.get_funding_redeemscript();
5891 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5892 if used_total_fee != msg.fee_satoshis {
5893 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5895 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5897 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5900 // The remote end may have decided to revoke their output due to inconsistent dust
5901 // limits, so check for that case by re-checking the signature here.
5902 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5903 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5904 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5908 for outp in closing_tx.trust().built_transaction().output.iter() {
5909 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5910 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5914 let closure_reason = if self.initiated_shutdown() {
5915 ClosureReason::LocallyInitiatedCooperativeClosure
5917 ClosureReason::CounterpartyInitiatedCooperativeClosure
5920 assert!(self.context.shutdown_scriptpubkey.is_some());
5921 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5922 if last_fee == msg.fee_satoshis {
5923 let shutdown_result = ShutdownResult {
5925 monitor_update: None,
5926 dropped_outbound_htlcs: Vec::new(),
5927 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5928 channel_id: self.context.channel_id,
5929 user_channel_id: self.context.user_id,
5930 channel_capacity_satoshis: self.context.channel_value_satoshis,
5931 counterparty_node_id: self.context.counterparty_node_id,
5932 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5933 channel_funding_txo: self.context.get_funding_txo(),
5935 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5936 self.context.channel_state = ChannelState::ShutdownComplete;
5937 self.context.update_time_counter += 1;
5938 return Ok((None, Some(tx), Some(shutdown_result)));
5942 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5944 macro_rules! propose_fee {
5945 ($new_fee: expr) => {
5946 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5947 (closing_tx, $new_fee)
5949 self.build_closing_transaction($new_fee, false)
5952 return match &self.context.holder_signer {
5953 ChannelSignerType::Ecdsa(ecdsa) => {
5955 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5956 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5957 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5958 let shutdown_result = ShutdownResult {
5960 monitor_update: None,
5961 dropped_outbound_htlcs: Vec::new(),
5962 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5963 channel_id: self.context.channel_id,
5964 user_channel_id: self.context.user_id,
5965 channel_capacity_satoshis: self.context.channel_value_satoshis,
5966 counterparty_node_id: self.context.counterparty_node_id,
5967 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5968 channel_funding_txo: self.context.get_funding_txo(),
5970 self.context.channel_state = ChannelState::ShutdownComplete;
5971 self.context.update_time_counter += 1;
5972 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5973 (Some(tx), Some(shutdown_result))
5978 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5979 Ok((Some(msgs::ClosingSigned {
5980 channel_id: self.context.channel_id,
5981 fee_satoshis: used_fee,
5983 fee_range: Some(msgs::ClosingSignedFeeRange {
5984 min_fee_satoshis: our_min_fee,
5985 max_fee_satoshis: our_max_fee,
5987 }), signed_tx, shutdown_result))
5989 // TODO (taproot|arik)
5996 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5997 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5998 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
6000 if max_fee_satoshis < our_min_fee {
6001 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
6003 if min_fee_satoshis > our_max_fee {
6004 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
6007 if !self.context.is_outbound() {
6008 // They have to pay, so pick the highest fee in the overlapping range.
6009 // We should never set an upper bound aside from their full balance
6010 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
6011 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
6013 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
6014 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
6015 msg.fee_satoshis, our_min_fee, our_max_fee)));
6017 // The proposed fee is in our acceptable range, accept it and broadcast!
6018 propose_fee!(msg.fee_satoshis);
6021 // Old fee style negotiation. We don't bother to enforce whether they are complying
6022 // with the "making progress" requirements, we just comply and hope for the best.
6023 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
6024 if msg.fee_satoshis > last_fee {
6025 if msg.fee_satoshis < our_max_fee {
6026 propose_fee!(msg.fee_satoshis);
6027 } else if last_fee < our_max_fee {
6028 propose_fee!(our_max_fee);
6030 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
6033 if msg.fee_satoshis > our_min_fee {
6034 propose_fee!(msg.fee_satoshis);
6035 } else if last_fee > our_min_fee {
6036 propose_fee!(our_min_fee);
6038 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
6042 if msg.fee_satoshis < our_min_fee {
6043 propose_fee!(our_min_fee);
6044 } else if msg.fee_satoshis > our_max_fee {
6045 propose_fee!(our_max_fee);
6047 propose_fee!(msg.fee_satoshis);
6053 fn internal_htlc_satisfies_config(
6054 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6055 ) -> Result<(), (&'static str, u16)> {
6056 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6057 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6058 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6059 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6061 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6062 0x1000 | 12, // fee_insufficient
6065 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6067 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6068 0x1000 | 13, // incorrect_cltv_expiry
6074 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6075 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6076 /// unsuccessful, falls back to the previous one if one exists.
6077 pub fn htlc_satisfies_config(
6078 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6079 ) -> Result<(), (&'static str, u16)> {
6080 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6082 if let Some(prev_config) = self.context.prev_config() {
6083 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6090 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6091 self.context.cur_holder_commitment_transaction_number + 1
6094 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6095 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6098 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6099 self.context.cur_counterparty_commitment_transaction_number + 2
6103 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6104 &self.context.holder_signer
6108 pub fn get_value_stat(&self) -> ChannelValueStat {
6110 value_to_self_msat: self.context.value_to_self_msat,
6111 channel_value_msat: self.context.channel_value_satoshis * 1000,
6112 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6113 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6114 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6115 holding_cell_outbound_amount_msat: {
6117 for h in self.context.holding_cell_htlc_updates.iter() {
6119 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6127 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6128 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6132 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6133 /// Allowed in any state (including after shutdown)
6134 pub fn is_awaiting_monitor_update(&self) -> bool {
6135 self.context.channel_state.is_monitor_update_in_progress()
6138 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6139 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6140 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6141 self.context.blocked_monitor_updates[0].update.update_id - 1
6144 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6145 /// further blocked monitor update exists after the next.
6146 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6147 if self.context.blocked_monitor_updates.is_empty() { return None; }
6148 Some((self.context.blocked_monitor_updates.remove(0).update,
6149 !self.context.blocked_monitor_updates.is_empty()))
6152 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6153 /// immediately given to the user for persisting or `None` if it should be held as blocked.
6154 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6155 -> Option<ChannelMonitorUpdate> {
6156 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6157 if !release_monitor {
6158 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6167 pub fn blocked_monitor_updates_pending(&self) -> usize {
6168 self.context.blocked_monitor_updates.len()
6171 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6172 /// If the channel is outbound, this implies we have not yet broadcasted the funding
6173 /// transaction. If the channel is inbound, this implies simply that the channel has not
6175 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6176 if !self.is_awaiting_monitor_update() { return false; }
6178 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6179 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6181 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6182 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6183 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6186 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6187 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6188 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6189 // waiting for the initial monitor persistence. Thus, we check if our commitment
6190 // transaction numbers have both been iterated only exactly once (for the
6191 // funding_signed), and we're awaiting monitor update.
6193 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6194 // only way to get an awaiting-monitor-update state during initial funding is if the
6195 // initial monitor persistence is still pending).
6197 // Because deciding we're awaiting initial broadcast spuriously could result in
6198 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6199 // we hard-assert here, even in production builds.
6200 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6201 assert!(self.context.monitor_pending_channel_ready);
6202 assert_eq!(self.context.latest_monitor_update_id, 0);
6208 /// Returns true if our channel_ready has been sent
6209 pub fn is_our_channel_ready(&self) -> bool {
6210 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6211 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6214 /// Returns true if our peer has either initiated or agreed to shut down the channel.
6215 pub fn received_shutdown(&self) -> bool {
6216 self.context.channel_state.is_remote_shutdown_sent()
6219 /// Returns true if we either initiated or agreed to shut down the channel.
6220 pub fn sent_shutdown(&self) -> bool {
6221 self.context.channel_state.is_local_shutdown_sent()
6224 /// Returns true if we initiated to shut down the channel.
6225 pub fn initiated_shutdown(&self) -> bool {
6226 self.context.local_initiated_shutdown.is_some()
6229 /// Returns true if this channel is fully shut down. True here implies that no further actions
6230 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6231 /// will be handled appropriately by the chain monitor.
6232 pub fn is_shutdown(&self) -> bool {
6233 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6236 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6237 self.context.channel_update_status
6240 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6241 self.context.update_time_counter += 1;
6242 self.context.channel_update_status = status;
6245 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6247 // * always when a new block/transactions are confirmed with the new height
6248 // * when funding is signed with a height of 0
6249 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6253 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6254 if funding_tx_confirmations <= 0 {
6255 self.context.funding_tx_confirmation_height = 0;
6258 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6262 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6263 // channel_ready yet.
6264 if self.context.signer_pending_funding {
6268 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6269 // channel_ready until the entire batch is ready.
6270 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6271 self.context.channel_state.set_our_channel_ready();
6273 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6274 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6275 self.context.update_time_counter += 1;
6277 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6278 // We got a reorg but not enough to trigger a force close, just ignore.
6281 if self.context.funding_tx_confirmation_height != 0 &&
6282 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6284 // We should never see a funding transaction on-chain until we've received
6285 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6286 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6287 // however, may do this and we shouldn't treat it as a bug.
6288 #[cfg(not(fuzzing))]
6289 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6290 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6291 self.context.channel_state.to_u32());
6293 // We got a reorg but not enough to trigger a force close, just ignore.
6297 if need_commitment_update {
6298 if !self.context.channel_state.is_monitor_update_in_progress() {
6299 if !self.context.channel_state.is_peer_disconnected() {
6300 let next_per_commitment_point =
6301 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6302 return Some(msgs::ChannelReady {
6303 channel_id: self.context.channel_id,
6304 next_per_commitment_point,
6305 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6309 self.context.monitor_pending_channel_ready = true;
6315 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6316 /// In the first case, we store the confirmation height and calculating the short channel id.
6317 /// In the second, we simply return an Err indicating we need to be force-closed now.
6318 pub fn transactions_confirmed<NS: Deref, L: Deref>(
6319 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6320 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6321 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6323 NS::Target: NodeSigner,
6326 let mut msgs = (None, None);
6327 if let Some(funding_txo) = self.context.get_funding_txo() {
6328 for &(index_in_block, tx) in txdata.iter() {
6329 // Check if the transaction is the expected funding transaction, and if it is,
6330 // check that it pays the right amount to the right script.
6331 if self.context.funding_tx_confirmation_height == 0 {
6332 if tx.txid() == funding_txo.txid {
6333 let txo_idx = funding_txo.index as usize;
6334 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6335 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6336 if self.context.is_outbound() {
6337 // If we generated the funding transaction and it doesn't match what it
6338 // should, the client is really broken and we should just panic and
6339 // tell them off. That said, because hash collisions happen with high
6340 // probability in fuzzing mode, if we're fuzzing we just close the
6341 // channel and move on.
6342 #[cfg(not(fuzzing))]
6343 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6345 self.context.update_time_counter += 1;
6346 let err_reason = "funding tx had wrong script/value or output index";
6347 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6349 if self.context.is_outbound() {
6350 if !tx.is_coin_base() {
6351 for input in tx.input.iter() {
6352 if input.witness.is_empty() {
6353 // We generated a malleable funding transaction, implying we've
6354 // just exposed ourselves to funds loss to our counterparty.
6355 #[cfg(not(fuzzing))]
6356 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6361 self.context.funding_tx_confirmation_height = height;
6362 self.context.funding_tx_confirmed_in = Some(*block_hash);
6363 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6364 Ok(scid) => Some(scid),
6365 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6368 // If this is a coinbase transaction and not a 0-conf channel
6369 // we should update our min_depth to 100 to handle coinbase maturity
6370 if tx.is_coin_base() &&
6371 self.context.minimum_depth.unwrap_or(0) > 0 &&
6372 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6373 self.context.minimum_depth = Some(COINBASE_MATURITY);
6376 // If we allow 1-conf funding, we may need to check for channel_ready here and
6377 // send it immediately instead of waiting for a best_block_updated call (which
6378 // may have already happened for this block).
6379 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6380 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6381 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6382 msgs = (Some(channel_ready), announcement_sigs);
6385 for inp in tx.input.iter() {
6386 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6387 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6388 return Err(ClosureReason::CommitmentTxConfirmed);
6396 /// When a new block is connected, we check the height of the block against outbound holding
6397 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6398 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6399 /// handled by the ChannelMonitor.
6401 /// If we return Err, the channel may have been closed, at which point the standard
6402 /// requirements apply - no calls may be made except those explicitly stated to be allowed
6405 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6407 pub fn best_block_updated<NS: Deref, L: Deref>(
6408 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6409 node_signer: &NS, user_config: &UserConfig, logger: &L
6410 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6412 NS::Target: NodeSigner,
6415 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6418 fn do_best_block_updated<NS: Deref, L: Deref>(
6419 &mut self, height: u32, highest_header_time: u32,
6420 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6421 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6423 NS::Target: NodeSigner,
6426 let mut timed_out_htlcs = Vec::new();
6427 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6428 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6430 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6431 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6433 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6434 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6435 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6443 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6445 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6446 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6447 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6449 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6450 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6453 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6454 self.context.channel_state.is_our_channel_ready() {
6455 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6456 if self.context.funding_tx_confirmation_height == 0 {
6457 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6458 // zero if it has been reorged out, however in either case, our state flags
6459 // indicate we've already sent a channel_ready
6460 funding_tx_confirmations = 0;
6463 // If we've sent channel_ready (or have both sent and received channel_ready), and
6464 // the funding transaction has become unconfirmed,
6465 // close the channel and hope we can get the latest state on chain (because presumably
6466 // the funding transaction is at least still in the mempool of most nodes).
6468 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6469 // 0-conf channel, but not doing so may lead to the
6470 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
6472 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6473 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6474 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6475 return Err(ClosureReason::ProcessingError { err: err_reason });
6477 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6478 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6479 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6480 // If funding_tx_confirmed_in is unset, the channel must not be active
6481 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6482 assert!(!self.context.channel_state.is_our_channel_ready());
6483 return Err(ClosureReason::FundingTimedOut);
6486 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6487 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6489 Ok((None, timed_out_htlcs, announcement_sigs))
6492 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6493 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6494 /// before the channel has reached channel_ready and we can just wait for more blocks.
6495 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6496 if self.context.funding_tx_confirmation_height != 0 {
6497 // We handle the funding disconnection by calling best_block_updated with a height one
6498 // below where our funding was connected, implying a reorg back to conf_height - 1.
6499 let reorg_height = self.context.funding_tx_confirmation_height - 1;
6500 // We use the time field to bump the current time we set on channel updates if its
6501 // larger. If we don't know that time has moved forward, we can just set it to the last
6502 // time we saw and it will be ignored.
6503 let best_time = self.context.update_time_counter;
6504 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6505 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6506 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6507 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6508 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6514 // We never learned about the funding confirmation anyway, just ignore
6519 // Methods to get unprompted messages to send to the remote end (or where we already returned
6520 // something in the handler for the message that prompted this message):
6522 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6523 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6524 /// directions). Should be used for both broadcasted announcements and in response to an
6525 /// AnnouncementSignatures message from the remote peer.
6527 /// Will only fail if we're not in a state where channel_announcement may be sent (including
6530 /// This will only return ChannelError::Ignore upon failure.
6532 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6533 fn get_channel_announcement<NS: Deref>(
6534 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6535 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6536 if !self.context.config.announced_channel {
6537 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6539 if !self.context.is_usable() {
6540 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6543 let short_channel_id = self.context.get_short_channel_id()
6544 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6545 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6546 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6547 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6548 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6550 let msg = msgs::UnsignedChannelAnnouncement {
6551 features: channelmanager::provided_channel_features(&user_config),
6554 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6555 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6556 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6557 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6558 excess_data: Vec::new(),
6564 fn get_announcement_sigs<NS: Deref, L: Deref>(
6565 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6566 best_block_height: u32, logger: &L
6567 ) -> Option<msgs::AnnouncementSignatures>
6569 NS::Target: NodeSigner,
6572 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6576 if !self.context.is_usable() {
6580 if self.context.channel_state.is_peer_disconnected() {
6581 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6585 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6589 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6590 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6593 log_trace!(logger, "{:?}", e);
6597 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6599 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6604 match &self.context.holder_signer {
6605 ChannelSignerType::Ecdsa(ecdsa) => {
6606 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6608 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6613 let short_channel_id = match self.context.get_short_channel_id() {
6615 None => return None,
6618 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6620 Some(msgs::AnnouncementSignatures {
6621 channel_id: self.context.channel_id(),
6623 node_signature: our_node_sig,
6624 bitcoin_signature: our_bitcoin_sig,
6627 // TODO (taproot|arik)
6633 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6635 fn sign_channel_announcement<NS: Deref>(
6636 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6637 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6638 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6639 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6640 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6641 let were_node_one = announcement.node_id_1 == our_node_key;
6643 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6644 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6645 match &self.context.holder_signer {
6646 ChannelSignerType::Ecdsa(ecdsa) => {
6647 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6648 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6649 Ok(msgs::ChannelAnnouncement {
6650 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6651 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6652 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6653 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6654 contents: announcement,
6657 // TODO (taproot|arik)
6662 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6666 /// Processes an incoming announcement_signatures message, providing a fully-signed
6667 /// channel_announcement message which we can broadcast and storing our counterparty's
6668 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6669 pub fn announcement_signatures<NS: Deref>(
6670 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6671 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6672 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6673 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6675 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6677 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6678 return Err(ChannelError::Close(format!(
6679 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6680 &announcement, self.context.get_counterparty_node_id())));
6682 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6683 return Err(ChannelError::Close(format!(
6684 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6685 &announcement, self.context.counterparty_funding_pubkey())));
6688 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6689 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6690 return Err(ChannelError::Ignore(
6691 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6694 self.sign_channel_announcement(node_signer, announcement)
6697 /// Gets a signed channel_announcement for this channel, if we previously received an
6698 /// announcement_signatures from our counterparty.
6699 pub fn get_signed_channel_announcement<NS: Deref>(
6700 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6701 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6702 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6705 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6707 Err(_) => return None,
6709 match self.sign_channel_announcement(node_signer, announcement) {
6710 Ok(res) => Some(res),
6715 /// May panic if called on a channel that wasn't immediately-previously
6716 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6717 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6718 assert!(self.context.channel_state.is_peer_disconnected());
6719 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6720 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6721 // current to_remote balances. However, it no longer has any use, and thus is now simply
6722 // set to a dummy (but valid, as required by the spec) public key.
6723 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6724 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6725 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6726 let mut pk = [2; 33]; pk[1] = 0xff;
6727 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6728 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6729 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6730 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6733 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6736 self.mark_awaiting_response();
6737 msgs::ChannelReestablish {
6738 channel_id: self.context.channel_id(),
6739 // The protocol has two different commitment number concepts - the "commitment
6740 // transaction number", which starts from 0 and counts up, and the "revocation key
6741 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6742 // commitment transaction numbers by the index which will be used to reveal the
6743 // revocation key for that commitment transaction, which means we have to convert them
6744 // to protocol-level commitment numbers here...
6746 // next_local_commitment_number is the next commitment_signed number we expect to
6747 // receive (indicating if they need to resend one that we missed).
6748 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6749 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6750 // receive, however we track it by the next commitment number for a remote transaction
6751 // (which is one further, as they always revoke previous commitment transaction, not
6752 // the one we send) so we have to decrement by 1. Note that if
6753 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6754 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6756 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6757 your_last_per_commitment_secret: remote_last_secret,
6758 my_current_per_commitment_point: dummy_pubkey,
6759 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6760 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6761 // txid of that interactive transaction, else we MUST NOT set it.
6762 next_funding_txid: None,
6767 // Send stuff to our remote peers:
6769 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6770 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6771 /// commitment update.
6773 /// `Err`s will only be [`ChannelError::Ignore`].
6774 pub fn queue_add_htlc<F: Deref, L: Deref>(
6775 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6776 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6777 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6778 ) -> Result<(), ChannelError>
6779 where F::Target: FeeEstimator, L::Target: Logger
6782 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6783 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6784 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6786 if let ChannelError::Ignore(_) = err { /* fine */ }
6787 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6792 /// Adds a pending outbound HTLC to this channel, note that you probably want
6793 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6795 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6797 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6798 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6800 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6801 /// we may not yet have sent the previous commitment update messages and will need to
6802 /// regenerate them.
6804 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6805 /// on this [`Channel`] if `force_holding_cell` is false.
6807 /// `Err`s will only be [`ChannelError::Ignore`].
6808 fn send_htlc<F: Deref, L: Deref>(
6809 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6810 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6811 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6812 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6813 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6814 where F::Target: FeeEstimator, L::Target: Logger
6816 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6817 self.context.channel_state.is_local_shutdown_sent() ||
6818 self.context.channel_state.is_remote_shutdown_sent()
6820 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6822 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6823 if amount_msat > channel_total_msat {
6824 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6827 if amount_msat == 0 {
6828 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6831 let available_balances = self.context.get_available_balances(fee_estimator);
6832 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6833 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6834 available_balances.next_outbound_htlc_minimum_msat)));
6837 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6838 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6839 available_balances.next_outbound_htlc_limit_msat)));
6842 if self.context.channel_state.is_peer_disconnected() {
6843 // Note that this should never really happen, if we're !is_live() on receipt of an
6844 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6845 // the user to send directly into a !is_live() channel. However, if we
6846 // disconnected during the time the previous hop was doing the commitment dance we may
6847 // end up getting here after the forwarding delay. In any case, returning an
6848 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6849 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6852 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6853 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6854 payment_hash, amount_msat,
6855 if force_holding_cell { "into holding cell" }
6856 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6857 else { "to peer" });
6859 if need_holding_cell {
6860 force_holding_cell = true;
6863 // Now update local state:
6864 if force_holding_cell {
6865 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6870 onion_routing_packet,
6877 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6878 htlc_id: self.context.next_holder_htlc_id,
6880 payment_hash: payment_hash.clone(),
6882 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6888 let res = msgs::UpdateAddHTLC {
6889 channel_id: self.context.channel_id,
6890 htlc_id: self.context.next_holder_htlc_id,
6894 onion_routing_packet,
6898 self.context.next_holder_htlc_id += 1;
6903 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6904 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6905 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6906 // fail to generate this, we still are at least at a position where upgrading their status
6908 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6909 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6910 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6912 if let Some(state) = new_state {
6913 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6917 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6918 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6919 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6920 // Grab the preimage, if it exists, instead of cloning
6921 let mut reason = OutboundHTLCOutcome::Success(None);
6922 mem::swap(outcome, &mut reason);
6923 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6926 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6927 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6928 debug_assert!(!self.context.is_outbound());
6929 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6930 self.context.feerate_per_kw = feerate;
6931 self.context.pending_update_fee = None;
6934 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6936 let (mut htlcs_ref, counterparty_commitment_tx) =
6937 self.build_commitment_no_state_update(logger);
6938 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6939 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6940 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6942 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6943 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6946 self.context.latest_monitor_update_id += 1;
6947 let monitor_update = ChannelMonitorUpdate {
6948 update_id: self.context.latest_monitor_update_id,
6949 counterparty_node_id: Some(self.context.counterparty_node_id),
6950 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6951 commitment_txid: counterparty_commitment_txid,
6952 htlc_outputs: htlcs.clone(),
6953 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6954 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6955 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6956 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6957 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6959 channel_id: Some(self.context.channel_id()),
6961 self.context.channel_state.set_awaiting_remote_revoke();
6965 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6966 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6967 where L::Target: Logger
6969 let counterparty_keys = self.context.build_remote_transaction_keys();
6970 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6971 let counterparty_commitment_tx = commitment_stats.tx;
6973 #[cfg(any(test, fuzzing))]
6975 if !self.context.is_outbound() {
6976 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6977 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6978 if let Some(info) = projected_commit_tx_info {
6979 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6980 if info.total_pending_htlcs == total_pending_htlcs
6981 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6982 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6983 && info.feerate == self.context.feerate_per_kw {
6984 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6985 assert_eq!(actual_fee, info.fee);
6991 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6994 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6995 /// generation when we shouldn't change HTLC/channel state.
6996 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6997 // Get the fee tests from `build_commitment_no_state_update`
6998 #[cfg(any(test, fuzzing))]
6999 self.build_commitment_no_state_update(logger);
7001 let counterparty_keys = self.context.build_remote_transaction_keys();
7002 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7003 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
7005 match &self.context.holder_signer {
7006 ChannelSignerType::Ecdsa(ecdsa) => {
7007 let (signature, htlc_signatures);
7010 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
7011 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
7015 let res = ecdsa.sign_counterparty_commitment(
7016 &commitment_stats.tx,
7017 commitment_stats.inbound_htlc_preimages,
7018 commitment_stats.outbound_htlc_preimages,
7019 &self.context.secp_ctx,
7020 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
7022 htlc_signatures = res.1;
7024 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
7025 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
7026 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
7027 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
7029 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
7030 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
7031 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
7032 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
7033 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
7034 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
7038 Ok((msgs::CommitmentSigned {
7039 channel_id: self.context.channel_id,
7043 partial_signature_with_nonce: None,
7044 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7046 // TODO (taproot|arik)
7052 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7053 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7055 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7056 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7057 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7058 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7059 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7060 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7061 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7062 where F::Target: FeeEstimator, L::Target: Logger
7064 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7065 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7066 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7069 let monitor_update = self.build_commitment_no_status_check(logger);
7070 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7071 Ok(self.push_ret_blockable_mon_update(monitor_update))
7077 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7079 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7080 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7081 fee_base_msat: msg.contents.fee_base_msat,
7082 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7083 cltv_expiry_delta: msg.contents.cltv_expiry_delta
7085 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7087 self.context.counterparty_forwarding_info = new_forwarding_info;
7093 /// Begins the shutdown process, getting a message for the remote peer and returning all
7094 /// holding cell HTLCs for payment failure.
7095 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7096 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7097 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7099 for htlc in self.context.pending_outbound_htlcs.iter() {
7100 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7101 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7104 if self.context.channel_state.is_local_shutdown_sent() {
7105 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7107 else if self.context.channel_state.is_remote_shutdown_sent() {
7108 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7110 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7111 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7113 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7114 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7115 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7118 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7121 // use override shutdown script if provided
7122 let shutdown_scriptpubkey = match override_shutdown_script {
7123 Some(script) => script,
7125 // otherwise, use the shutdown scriptpubkey provided by the signer
7126 match signer_provider.get_shutdown_scriptpubkey() {
7127 Ok(scriptpubkey) => scriptpubkey,
7128 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7132 if !shutdown_scriptpubkey.is_compatible(their_features) {
7133 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7135 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7140 // From here on out, we may not fail!
7141 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7142 self.context.channel_state.set_local_shutdown_sent();
7143 self.context.local_initiated_shutdown = Some(());
7144 self.context.update_time_counter += 1;
7146 let monitor_update = if update_shutdown_script {
7147 self.context.latest_monitor_update_id += 1;
7148 let monitor_update = ChannelMonitorUpdate {
7149 update_id: self.context.latest_monitor_update_id,
7150 counterparty_node_id: Some(self.context.counterparty_node_id),
7151 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7152 scriptpubkey: self.get_closing_scriptpubkey(),
7154 channel_id: Some(self.context.channel_id()),
7156 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7157 self.push_ret_blockable_mon_update(monitor_update)
7159 let shutdown = msgs::Shutdown {
7160 channel_id: self.context.channel_id,
7161 scriptpubkey: self.get_closing_scriptpubkey(),
7164 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7165 // our shutdown until we've committed all of the pending changes.
7166 self.context.holding_cell_update_fee = None;
7167 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7168 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7170 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7171 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7178 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7179 "we can't both complete shutdown and return a monitor update");
7181 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7184 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7185 self.context.holding_cell_htlc_updates.iter()
7186 .flat_map(|htlc_update| {
7188 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7189 => Some((source, payment_hash)),
7193 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7197 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7198 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7199 pub context: ChannelContext<SP>,
7200 pub unfunded_context: UnfundedChannelContext,
7203 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7204 pub fn new<ES: Deref, F: Deref>(
7205 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7206 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7207 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7208 ) -> Result<OutboundV1Channel<SP>, APIError>
7209 where ES::Target: EntropySource,
7210 F::Target: FeeEstimator
7212 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7213 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7214 // Protocol level safety check in place, although it should never happen because
7215 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7216 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7217 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7220 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7221 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7222 let pubkeys = holder_signer.pubkeys().clone();
7225 context: ChannelContext::new_for_outbound_channel(
7229 counterparty_node_id,
7231 channel_value_satoshis,
7235 current_chain_height,
7236 outbound_scid_alias,
7237 temporary_channel_id,
7238 holder_selected_channel_reserve_satoshis,
7243 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7248 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7249 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7250 let counterparty_keys = self.context.build_remote_transaction_keys();
7251 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7252 let signature = match &self.context.holder_signer {
7253 // TODO (taproot|arik): move match into calling method for Taproot
7254 ChannelSignerType::Ecdsa(ecdsa) => {
7255 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7256 .map(|(sig, _)| sig).ok()?
7258 // TODO (taproot|arik)
7263 if self.context.signer_pending_funding {
7264 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7265 self.context.signer_pending_funding = false;
7268 Some(msgs::FundingCreated {
7269 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7270 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7271 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7274 partial_signature_with_nonce: None,
7276 next_local_nonce: None,
7280 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7281 /// a funding_created message for the remote peer.
7282 /// Panics if called at some time other than immediately after initial handshake, if called twice,
7283 /// or if called on an inbound channel.
7284 /// Note that channel_id changes during this call!
7285 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7286 /// If an Err is returned, it is a ChannelError::Close.
7287 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7288 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7289 if !self.context.is_outbound() {
7290 panic!("Tried to create outbound funding_created message on an inbound channel!");
7293 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7294 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7296 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7298 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7299 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7300 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7301 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7304 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7305 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7307 // Now that we're past error-generating stuff, update our local state:
7309 self.context.channel_state = ChannelState::FundingNegotiated;
7310 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7312 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7313 // We can skip this if it is a zero-conf channel.
7314 if funding_transaction.is_coin_base() &&
7315 self.context.minimum_depth.unwrap_or(0) > 0 &&
7316 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7317 self.context.minimum_depth = Some(COINBASE_MATURITY);
7320 self.context.funding_transaction = Some(funding_transaction);
7321 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7323 let funding_created = self.get_funding_created_msg(logger);
7324 if funding_created.is_none() {
7325 #[cfg(not(async_signing))] {
7326 panic!("Failed to get signature for new funding creation");
7328 #[cfg(async_signing)] {
7329 if !self.context.signer_pending_funding {
7330 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7331 self.context.signer_pending_funding = true;
7339 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7340 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7341 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7342 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7343 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7344 ) -> Result<msgs::OpenChannel, ()>
7346 F::Target: FeeEstimator
7348 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7349 Ok(self.get_open_channel(chain_hash))
7352 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7353 if !self.context.is_outbound() {
7354 panic!("Tried to open a channel for an inbound channel?");
7356 if self.context.have_received_message() {
7357 panic!("Cannot generate an open_channel after we've moved forward");
7360 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7361 panic!("Tried to send an open_channel for a channel that has already advanced");
7364 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7365 let keys = self.context.get_holder_pubkeys();
7368 common_fields: msgs::CommonOpenChannelFields {
7370 temporary_channel_id: self.context.channel_id,
7371 funding_satoshis: self.context.channel_value_satoshis,
7372 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7373 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7374 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7375 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7376 to_self_delay: self.context.get_holder_selected_contest_delay(),
7377 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7378 funding_pubkey: keys.funding_pubkey,
7379 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7380 payment_basepoint: keys.payment_point,
7381 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7382 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7383 first_per_commitment_point,
7384 channel_flags: if self.context.config.announced_channel {1} else {0},
7385 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7386 Some(script) => script.clone().into_inner(),
7387 None => Builder::new().into_script(),
7389 channel_type: Some(self.context.channel_type.clone()),
7391 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7392 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7397 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7398 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7400 // Check sanity of message fields:
7401 if !self.context.is_outbound() {
7402 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7404 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7405 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7407 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7408 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7410 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7411 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7413 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7414 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7416 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7417 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7418 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7420 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7421 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7422 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7424 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7425 if msg.common_fields.to_self_delay > max_delay_acceptable {
7426 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7428 if msg.common_fields.max_accepted_htlcs < 1 {
7429 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7431 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7432 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7435 // Now check against optional parameters as set by config...
7436 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7437 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7439 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7440 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7442 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7443 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7445 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7446 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7448 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7449 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7451 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7452 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7454 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7455 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7458 if let Some(ty) = &msg.common_fields.channel_type {
7459 if *ty != self.context.channel_type {
7460 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7462 } else if their_features.supports_channel_type() {
7463 // Assume they've accepted the channel type as they said they understand it.
7465 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7466 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7467 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7469 self.context.channel_type = channel_type.clone();
7470 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7473 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7474 match &msg.common_fields.shutdown_scriptpubkey {
7475 &Some(ref script) => {
7476 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7477 if script.len() == 0 {
7480 if !script::is_bolt2_compliant(&script, their_features) {
7481 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7483 Some(script.clone())
7486 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7488 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7493 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7494 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7495 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7496 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7497 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7499 if peer_limits.trust_own_funding_0conf {
7500 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7502 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7505 let counterparty_pubkeys = ChannelPublicKeys {
7506 funding_pubkey: msg.common_fields.funding_pubkey,
7507 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7508 payment_point: msg.common_fields.payment_basepoint,
7509 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7510 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7513 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7514 selected_contest_delay: msg.common_fields.to_self_delay,
7515 pubkeys: counterparty_pubkeys,
7518 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7519 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7521 self.context.channel_state = ChannelState::NegotiatingFunding(
7522 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7524 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7529 /// Handles a funding_signed message from the remote end.
7530 /// If this call is successful, broadcast the funding transaction (and not before!)
7531 pub fn funding_signed<L: Deref>(
7532 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7533 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7537 if !self.context.is_outbound() {
7538 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7540 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7541 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7543 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7544 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7545 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7546 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7549 let funding_script = self.context.get_funding_redeemscript();
7551 let counterparty_keys = self.context.build_remote_transaction_keys();
7552 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7553 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7554 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7556 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7557 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7559 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7560 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7562 let trusted_tx = initial_commitment_tx.trust();
7563 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7564 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7565 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7566 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7567 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7571 let holder_commitment_tx = HolderCommitmentTransaction::new(
7572 initial_commitment_tx,
7575 &self.context.get_holder_pubkeys().funding_pubkey,
7576 self.context.counterparty_funding_pubkey()
7580 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7581 if validated.is_err() {
7582 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7585 let funding_redeemscript = self.context.get_funding_redeemscript();
7586 let funding_txo = self.context.get_funding_txo().unwrap();
7587 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7588 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7589 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7590 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7591 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7592 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7593 shutdown_script, self.context.get_holder_selected_contest_delay(),
7594 &self.context.destination_script, (funding_txo, funding_txo_script),
7595 &self.context.channel_transaction_parameters,
7596 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7598 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7599 channel_monitor.provide_initial_counterparty_commitment_tx(
7600 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7601 self.context.cur_counterparty_commitment_transaction_number,
7602 self.context.counterparty_cur_commitment_point.unwrap(),
7603 counterparty_initial_commitment_tx.feerate_per_kw(),
7604 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7605 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7607 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7608 if self.context.is_batch_funding() {
7609 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7611 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7613 self.context.cur_holder_commitment_transaction_number -= 1;
7614 self.context.cur_counterparty_commitment_transaction_number -= 1;
7616 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7618 let mut channel = Channel {
7619 context: self.context,
7620 #[cfg(dual_funding)]
7621 dual_funding_channel_context: None,
7624 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7625 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7626 Ok((channel, channel_monitor))
7629 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7631 #[cfg(async_signing)]
7632 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7633 if self.context.signer_pending_funding && self.context.is_outbound() {
7634 log_trace!(logger, "Signer unblocked a funding_created");
7635 self.get_funding_created_msg(logger)
7640 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7641 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7642 pub context: ChannelContext<SP>,
7643 pub unfunded_context: UnfundedChannelContext,
7646 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7647 /// [`msgs::CommonOpenChannelFields`].
7648 pub(super) fn channel_type_from_open_channel(
7649 common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7650 our_supported_features: &ChannelTypeFeatures
7651 ) -> Result<ChannelTypeFeatures, ChannelError> {
7652 if let Some(channel_type) = &common_fields.channel_type {
7653 if channel_type.supports_any_optional_bits() {
7654 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7657 // We only support the channel types defined by the `ChannelManager` in
7658 // `provided_channel_type_features`. The channel type must always support
7659 // `static_remote_key`.
7660 if !channel_type.requires_static_remote_key() {
7661 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7663 // Make sure we support all of the features behind the channel type.
7664 if !channel_type.is_subset(our_supported_features) {
7665 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7667 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7668 if channel_type.requires_scid_privacy() && announced_channel {
7669 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7671 Ok(channel_type.clone())
7673 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7674 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7675 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7681 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7682 /// Creates a new channel from a remote sides' request for one.
7683 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7684 pub fn new<ES: Deref, F: Deref, L: Deref>(
7685 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7686 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7687 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7688 current_chain_height: u32, logger: &L, is_0conf: bool,
7689 ) -> Result<InboundV1Channel<SP>, ChannelError>
7690 where ES::Target: EntropySource,
7691 F::Target: FeeEstimator,
7694 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7696 // First check the channel type is known, failing before we do anything else if we don't
7697 // support this channel type.
7698 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7700 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7701 let counterparty_pubkeys = ChannelPublicKeys {
7702 funding_pubkey: msg.common_fields.funding_pubkey,
7703 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7704 payment_point: msg.common_fields.payment_basepoint,
7705 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7706 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7710 context: ChannelContext::new_for_inbound_channel(
7714 counterparty_node_id,
7718 current_chain_height,
7723 counterparty_pubkeys,
7725 holder_selected_channel_reserve_satoshis,
7726 msg.channel_reserve_satoshis,
7728 msg.common_fields.clone(),
7730 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7735 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7736 /// should be sent back to the counterparty node.
7738 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7739 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7740 if self.context.is_outbound() {
7741 panic!("Tried to send accept_channel for an outbound channel?");
7744 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7745 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7747 panic!("Tried to send accept_channel after channel had moved forward");
7749 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7750 panic!("Tried to send an accept_channel for a channel that has already advanced");
7753 self.generate_accept_channel_message()
7756 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7757 /// inbound channel. If the intention is to accept an inbound channel, use
7758 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7760 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7761 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7762 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7763 let keys = self.context.get_holder_pubkeys();
7765 msgs::AcceptChannel {
7766 common_fields: msgs::CommonAcceptChannelFields {
7767 temporary_channel_id: self.context.channel_id,
7768 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7769 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7770 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7771 minimum_depth: self.context.minimum_depth.unwrap(),
7772 to_self_delay: self.context.get_holder_selected_contest_delay(),
7773 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7774 funding_pubkey: keys.funding_pubkey,
7775 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7776 payment_basepoint: keys.payment_point,
7777 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7778 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7779 first_per_commitment_point,
7780 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7781 Some(script) => script.clone().into_inner(),
7782 None => Builder::new().into_script(),
7784 channel_type: Some(self.context.channel_type.clone()),
7786 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7788 next_local_nonce: None,
7792 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7793 /// inbound channel without accepting it.
7795 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7797 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7798 self.generate_accept_channel_message()
7801 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7802 let funding_script = self.context.get_funding_redeemscript();
7804 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7805 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7806 let trusted_tx = initial_commitment_tx.trust();
7807 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7808 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7809 // They sign the holder commitment transaction...
7810 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7811 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7812 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7813 encode::serialize_hex(&funding_script), &self.context.channel_id());
7814 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7816 Ok(initial_commitment_tx)
7819 pub fn funding_created<L: Deref>(
7820 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7821 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7825 if self.context.is_outbound() {
7826 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7829 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7830 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7832 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7833 // remember the channel, so it's safe to just send an error_message here and drop the
7835 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7837 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7838 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7839 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7840 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7843 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7844 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7845 // This is an externally observable change before we finish all our checks. In particular
7846 // check_funding_created_signature may fail.
7847 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7849 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7851 Err(ChannelError::Close(e)) => {
7852 self.context.channel_transaction_parameters.funding_outpoint = None;
7853 return Err((self, ChannelError::Close(e)));
7856 // The only error we know how to handle is ChannelError::Close, so we fall over here
7857 // to make sure we don't continue with an inconsistent state.
7858 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7862 let holder_commitment_tx = HolderCommitmentTransaction::new(
7863 initial_commitment_tx,
7866 &self.context.get_holder_pubkeys().funding_pubkey,
7867 self.context.counterparty_funding_pubkey()
7870 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7871 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7874 // Now that we're past error-generating stuff, update our local state:
7876 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7877 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7878 self.context.cur_counterparty_commitment_transaction_number -= 1;
7879 self.context.cur_holder_commitment_transaction_number -= 1;
7881 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7883 let funding_redeemscript = self.context.get_funding_redeemscript();
7884 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7885 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7886 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7887 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7888 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7889 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7890 shutdown_script, self.context.get_holder_selected_contest_delay(),
7891 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7892 &self.context.channel_transaction_parameters,
7893 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7895 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7896 channel_monitor.provide_initial_counterparty_commitment_tx(
7897 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7898 self.context.cur_counterparty_commitment_transaction_number + 1,
7899 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7900 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7901 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7903 log_info!(logger, "{} funding_signed for peer for channel {}",
7904 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7906 // Promote the channel to a full-fledged one now that we have updated the state and have a
7907 // `ChannelMonitor`.
7908 let mut channel = Channel {
7909 context: self.context,
7910 #[cfg(dual_funding)]
7911 dual_funding_channel_context: None,
7913 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7914 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7916 Ok((channel, funding_signed, channel_monitor))
7920 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
7921 #[cfg(dual_funding)]
7922 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
7923 pub context: ChannelContext<SP>,
7924 pub unfunded_context: UnfundedChannelContext,
7925 #[cfg(dual_funding)]
7926 pub dual_funding_context: DualFundingChannelContext,
7929 #[cfg(dual_funding)]
7930 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
7931 pub fn new<ES: Deref, F: Deref>(
7932 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7933 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
7934 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
7935 funding_confirmation_target: ConfirmationTarget,
7936 ) -> Result<OutboundV2Channel<SP>, APIError>
7937 where ES::Target: EntropySource,
7938 F::Target: FeeEstimator,
7940 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
7941 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
7942 let pubkeys = holder_signer.pubkeys().clone();
7944 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
7946 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
7947 funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7949 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
7950 let funding_tx_locktime = current_chain_height;
7953 context: ChannelContext::new_for_outbound_channel(
7957 counterparty_node_id,
7963 current_chain_height,
7964 outbound_scid_alias,
7965 temporary_channel_id,
7966 holder_selected_channel_reserve_satoshis,
7971 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
7972 dual_funding_context: DualFundingChannelContext {
7973 our_funding_satoshis: funding_satoshis,
7974 their_funding_satoshis: 0,
7975 funding_tx_locktime,
7976 funding_feerate_sat_per_1000_weight,
7982 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7983 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7984 /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
7985 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7986 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7987 ) -> Result<msgs::OpenChannelV2, ()>
7989 F::Target: FeeEstimator
7991 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7992 Ok(self.get_open_channel_v2(chain_hash))
7995 pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
7996 if self.context.have_received_message() {
7997 debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
8000 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8001 debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
8004 let first_per_commitment_point = self.context.holder_signer.as_ref()
8005 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
8006 &self.context.secp_ctx);
8007 let second_per_commitment_point = self.context.holder_signer.as_ref()
8008 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
8009 &self.context.secp_ctx);
8010 let keys = self.context.get_holder_pubkeys();
8012 msgs::OpenChannelV2 {
8013 common_fields: msgs::CommonOpenChannelFields {
8015 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8016 funding_satoshis: self.context.channel_value_satoshis,
8017 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8018 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8019 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8020 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8021 to_self_delay: self.context.get_holder_selected_contest_delay(),
8022 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8023 funding_pubkey: keys.funding_pubkey,
8024 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8025 payment_basepoint: keys.payment_point,
8026 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8027 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8028 first_per_commitment_point,
8029 channel_flags: if self.context.config.announced_channel {1} else {0},
8030 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8031 Some(script) => script.clone().into_inner(),
8032 None => Builder::new().into_script(),
8034 channel_type: Some(self.context.channel_type.clone()),
8036 funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8037 second_per_commitment_point,
8038 locktime: self.dual_funding_context.funding_tx_locktime,
8039 require_confirmed_inputs: None,
8044 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8045 #[cfg(dual_funding)]
8046 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8047 pub context: ChannelContext<SP>,
8048 pub unfunded_context: UnfundedChannelContext,
8049 pub dual_funding_context: DualFundingChannelContext,
8052 #[cfg(dual_funding)]
8053 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8054 /// Creates a new dual-funded channel from a remote side's request for one.
8055 /// Assumes chain_hash has already been checked and corresponds with what we expect!
8056 pub fn new<ES: Deref, F: Deref, L: Deref>(
8057 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8058 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8059 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8060 config: &UserConfig, current_chain_height: u32, logger: &L,
8061 ) -> Result<InboundV2Channel<SP>, ChannelError>
8062 where ES::Target: EntropySource,
8063 F::Target: FeeEstimator,
8066 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8067 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8068 channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8069 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8070 channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8072 // First check the channel type is known, failing before we do anything else if we don't
8073 // support this channel type.
8074 if msg.common_fields.channel_type.is_none() {
8075 return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8076 msg.common_fields.temporary_channel_id)))
8078 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8080 let counterparty_pubkeys = ChannelPublicKeys {
8081 funding_pubkey: msg.common_fields.funding_pubkey,
8082 revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8083 payment_point: msg.common_fields.payment_basepoint,
8084 delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8085 htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8088 let mut context = ChannelContext::new_for_inbound_channel(
8092 counterparty_node_id,
8096 current_chain_height,
8102 counterparty_pubkeys,
8104 holder_selected_channel_reserve_satoshis,
8105 counterparty_selected_channel_reserve_satoshis,
8106 0 /* push_msat not used in dual-funding */,
8107 msg.common_fields.clone(),
8109 let channel_id = ChannelId::v2_from_revocation_basepoints(
8110 &context.get_holder_pubkeys().revocation_basepoint,
8111 &context.get_counterparty_pubkeys().revocation_basepoint);
8112 context.channel_id = channel_id;
8116 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8117 dual_funding_context: DualFundingChannelContext {
8118 our_funding_satoshis: funding_satoshis,
8119 their_funding_satoshis: msg.common_fields.funding_satoshis,
8120 funding_tx_locktime: msg.locktime,
8121 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8128 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8129 /// should be sent back to the counterparty node.
8131 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8132 pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8133 if self.context.is_outbound() {
8134 debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8137 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8138 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8140 debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8142 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8143 debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8146 self.generate_accept_channel_v2_message()
8149 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8150 /// inbound channel. If the intention is to accept an inbound channel, use
8151 /// [`InboundV1Channel::accept_inbound_channel`] instead.
8153 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8154 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8155 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8156 self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8157 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8158 self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8159 let keys = self.context.get_holder_pubkeys();
8161 msgs::AcceptChannelV2 {
8162 common_fields: msgs::CommonAcceptChannelFields {
8163 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8164 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8165 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8166 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8167 minimum_depth: self.context.minimum_depth.unwrap(),
8168 to_self_delay: self.context.get_holder_selected_contest_delay(),
8169 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8170 funding_pubkey: keys.funding_pubkey,
8171 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8172 payment_basepoint: keys.payment_point,
8173 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8174 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8175 first_per_commitment_point,
8176 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8177 Some(script) => script.clone().into_inner(),
8178 None => Builder::new().into_script(),
8180 channel_type: Some(self.context.channel_type.clone()),
8182 funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8183 second_per_commitment_point,
8184 require_confirmed_inputs: None,
8188 /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8189 /// inbound channel without accepting it.
8191 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8193 pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8194 self.generate_accept_channel_v2_message()
8198 // Unfunded channel utilities
8200 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8201 // The default channel type (ie the first one we try) depends on whether the channel is
8202 // public - if it is, we just go with `only_static_remotekey` as it's the only option
8203 // available. If it's private, we first try `scid_privacy` as it provides better privacy
8204 // with no other changes, and fall back to `only_static_remotekey`.
8205 let mut ret = ChannelTypeFeatures::only_static_remote_key();
8206 if !config.channel_handshake_config.announced_channel &&
8207 config.channel_handshake_config.negotiate_scid_privacy &&
8208 their_features.supports_scid_privacy() {
8209 ret.set_scid_privacy_required();
8212 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8213 // set it now. If they don't understand it, we'll fall back to our default of
8214 // `only_static_remotekey`.
8215 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8216 their_features.supports_anchors_zero_fee_htlc_tx() {
8217 ret.set_anchors_zero_fee_htlc_tx_required();
8223 const SERIALIZATION_VERSION: u8 = 3;
8224 const MIN_SERIALIZATION_VERSION: u8 = 3;
8226 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8232 impl Writeable for ChannelUpdateStatus {
8233 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8234 // We only care about writing out the current state as it was announced, ie only either
8235 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8236 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8238 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8239 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8240 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8241 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8247 impl Readable for ChannelUpdateStatus {
8248 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8249 Ok(match <u8 as Readable>::read(reader)? {
8250 0 => ChannelUpdateStatus::Enabled,
8251 1 => ChannelUpdateStatus::Disabled,
8252 _ => return Err(DecodeError::InvalidValue),
8257 impl Writeable for AnnouncementSigsState {
8258 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8259 // We only care about writing out the current state as if we had just disconnected, at
8260 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8262 AnnouncementSigsState::NotSent => 0u8.write(writer),
8263 AnnouncementSigsState::MessageSent => 0u8.write(writer),
8264 AnnouncementSigsState::Committed => 0u8.write(writer),
8265 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8270 impl Readable for AnnouncementSigsState {
8271 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8272 Ok(match <u8 as Readable>::read(reader)? {
8273 0 => AnnouncementSigsState::NotSent,
8274 1 => AnnouncementSigsState::PeerReceived,
8275 _ => return Err(DecodeError::InvalidValue),
8280 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8281 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8282 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8285 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
8287 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8288 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8289 // the low bytes now and the optional high bytes later.
8290 let user_id_low = self.context.user_id as u64;
8291 user_id_low.write(writer)?;
8293 // Version 1 deserializers expected to read parts of the config object here. Version 2
8294 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8295 // `minimum_depth` we simply write dummy values here.
8296 writer.write_all(&[0; 8])?;
8298 self.context.channel_id.write(writer)?;
8300 let mut channel_state = self.context.channel_state;
8301 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8302 channel_state.set_peer_disconnected();
8304 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8306 channel_state.to_u32().write(writer)?;
8308 self.context.channel_value_satoshis.write(writer)?;
8310 self.context.latest_monitor_update_id.write(writer)?;
8312 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8313 // deserialized from that format.
8314 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8315 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8316 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8318 self.context.destination_script.write(writer)?;
8320 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8321 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8322 self.context.value_to_self_msat.write(writer)?;
8324 let mut dropped_inbound_htlcs = 0;
8325 for htlc in self.context.pending_inbound_htlcs.iter() {
8326 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8327 dropped_inbound_htlcs += 1;
8330 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8331 for htlc in self.context.pending_inbound_htlcs.iter() {
8332 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8335 htlc.htlc_id.write(writer)?;
8336 htlc.amount_msat.write(writer)?;
8337 htlc.cltv_expiry.write(writer)?;
8338 htlc.payment_hash.write(writer)?;
8340 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8341 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
8343 htlc_state.write(writer)?;
8345 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
8347 htlc_state.write(writer)?;
8349 &InboundHTLCState::Committed => {
8352 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8354 removal_reason.write(writer)?;
8359 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8360 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8361 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8363 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8364 for htlc in self.context.pending_outbound_htlcs.iter() {
8365 htlc.htlc_id.write(writer)?;
8366 htlc.amount_msat.write(writer)?;
8367 htlc.cltv_expiry.write(writer)?;
8368 htlc.payment_hash.write(writer)?;
8369 htlc.source.write(writer)?;
8371 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8373 onion_packet.write(writer)?;
8375 &OutboundHTLCState::Committed => {
8378 &OutboundHTLCState::RemoteRemoved(_) => {
8379 // Treat this as a Committed because we haven't received the CS - they'll
8380 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8383 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8385 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8386 preimages.push(preimage);
8388 let reason: Option<&HTLCFailReason> = outcome.into();
8389 reason.write(writer)?;
8391 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8393 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8394 preimages.push(preimage);
8396 let reason: Option<&HTLCFailReason> = outcome.into();
8397 reason.write(writer)?;
8400 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8401 pending_outbound_blinding_points.push(htlc.blinding_point);
8404 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8405 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8406 // Vec of (htlc_id, failure_code, sha256_of_onion)
8407 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8408 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8409 for update in self.context.holding_cell_htlc_updates.iter() {
8411 &HTLCUpdateAwaitingACK::AddHTLC {
8412 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8413 blinding_point, skimmed_fee_msat,
8416 amount_msat.write(writer)?;
8417 cltv_expiry.write(writer)?;
8418 payment_hash.write(writer)?;
8419 source.write(writer)?;
8420 onion_routing_packet.write(writer)?;
8422 holding_cell_skimmed_fees.push(skimmed_fee_msat);
8423 holding_cell_blinding_points.push(blinding_point);
8425 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8427 payment_preimage.write(writer)?;
8428 htlc_id.write(writer)?;
8430 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8432 htlc_id.write(writer)?;
8433 err_packet.write(writer)?;
8435 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8436 htlc_id, failure_code, sha256_of_onion
8438 // We don't want to break downgrading by adding a new variant, so write a dummy
8439 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8440 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8442 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8444 htlc_id.write(writer)?;
8445 dummy_err_packet.write(writer)?;
8450 match self.context.resend_order {
8451 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8452 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8455 self.context.monitor_pending_channel_ready.write(writer)?;
8456 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8457 self.context.monitor_pending_commitment_signed.write(writer)?;
8459 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8460 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8461 pending_forward.write(writer)?;
8462 htlc_id.write(writer)?;
8465 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8466 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8467 htlc_source.write(writer)?;
8468 payment_hash.write(writer)?;
8469 fail_reason.write(writer)?;
8472 if self.context.is_outbound() {
8473 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8474 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8475 Some(feerate).write(writer)?;
8477 // As for inbound HTLCs, if the update was only announced and never committed in a
8478 // commitment_signed, drop it.
8479 None::<u32>.write(writer)?;
8481 self.context.holding_cell_update_fee.write(writer)?;
8483 self.context.next_holder_htlc_id.write(writer)?;
8484 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8485 self.context.update_time_counter.write(writer)?;
8486 self.context.feerate_per_kw.write(writer)?;
8488 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8489 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8490 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8491 // consider the stale state on reload.
8494 self.context.funding_tx_confirmed_in.write(writer)?;
8495 self.context.funding_tx_confirmation_height.write(writer)?;
8496 self.context.short_channel_id.write(writer)?;
8498 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8499 self.context.holder_dust_limit_satoshis.write(writer)?;
8500 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8502 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8503 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8505 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8506 self.context.holder_htlc_minimum_msat.write(writer)?;
8507 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8509 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8510 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8512 match &self.context.counterparty_forwarding_info {
8515 info.fee_base_msat.write(writer)?;
8516 info.fee_proportional_millionths.write(writer)?;
8517 info.cltv_expiry_delta.write(writer)?;
8519 None => 0u8.write(writer)?
8522 self.context.channel_transaction_parameters.write(writer)?;
8523 self.context.funding_transaction.write(writer)?;
8525 self.context.counterparty_cur_commitment_point.write(writer)?;
8526 self.context.counterparty_prev_commitment_point.write(writer)?;
8527 self.context.counterparty_node_id.write(writer)?;
8529 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8531 self.context.commitment_secrets.write(writer)?;
8533 self.context.channel_update_status.write(writer)?;
8535 #[cfg(any(test, fuzzing))]
8536 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8537 #[cfg(any(test, fuzzing))]
8538 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8539 htlc.write(writer)?;
8542 // If the channel type is something other than only-static-remote-key, then we need to have
8543 // older clients fail to deserialize this channel at all. If the type is
8544 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8546 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8547 Some(&self.context.channel_type) } else { None };
8549 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8550 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8551 // a different percentage of the channel value then 10%, which older versions of LDK used
8552 // to set it to before the percentage was made configurable.
8553 let serialized_holder_selected_reserve =
8554 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8555 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8557 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8558 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8559 let serialized_holder_htlc_max_in_flight =
8560 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8561 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8563 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8564 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8566 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8567 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8568 // we write the high bytes as an option here.
8569 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8571 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8573 write_tlv_fields!(writer, {
8574 (0, self.context.announcement_sigs, option),
8575 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8576 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8577 // them twice, once with their original default values above, and once as an option
8578 // here. On the read side, old versions will simply ignore the odd-type entries here,
8579 // and new versions map the default values to None and allow the TLV entries here to
8581 (1, self.context.minimum_depth, option),
8582 (2, chan_type, option),
8583 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8584 (4, serialized_holder_selected_reserve, option),
8585 (5, self.context.config, required),
8586 (6, serialized_holder_htlc_max_in_flight, option),
8587 (7, self.context.shutdown_scriptpubkey, option),
8588 (8, self.context.blocked_monitor_updates, optional_vec),
8589 (9, self.context.target_closing_feerate_sats_per_kw, option),
8590 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8591 (13, self.context.channel_creation_height, required),
8592 (15, preimages, required_vec),
8593 (17, self.context.announcement_sigs_state, required),
8594 (19, self.context.latest_inbound_scid_alias, option),
8595 (21, self.context.outbound_scid_alias, required),
8596 (23, channel_ready_event_emitted, option),
8597 (25, user_id_high_opt, option),
8598 (27, self.context.channel_keys_id, required),
8599 (28, holder_max_accepted_htlcs, option),
8600 (29, self.context.temporary_channel_id, option),
8601 (31, channel_pending_event_emitted, option),
8602 (35, pending_outbound_skimmed_fees, optional_vec),
8603 (37, holding_cell_skimmed_fees, optional_vec),
8604 (38, self.context.is_batch_funding, option),
8605 (39, pending_outbound_blinding_points, optional_vec),
8606 (41, holding_cell_blinding_points, optional_vec),
8607 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8608 (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8615 const MAX_ALLOC_SIZE: usize = 64*1024;
8616 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8618 ES::Target: EntropySource,
8619 SP::Target: SignerProvider
8621 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8622 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8623 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8625 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8626 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8627 // the low bytes now and the high bytes later.
8628 let user_id_low: u64 = Readable::read(reader)?;
8630 let mut config = Some(LegacyChannelConfig::default());
8632 // Read the old serialization of the ChannelConfig from version 0.0.98.
8633 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8634 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8635 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8636 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8638 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8639 let mut _val: u64 = Readable::read(reader)?;
8642 let channel_id = Readable::read(reader)?;
8643 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8644 let channel_value_satoshis = Readable::read(reader)?;
8646 let latest_monitor_update_id = Readable::read(reader)?;
8648 let mut keys_data = None;
8650 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8651 // the `channel_keys_id` TLV is present below.
8652 let keys_len: u32 = Readable::read(reader)?;
8653 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8654 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8655 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8656 let mut data = [0; 1024];
8657 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8658 reader.read_exact(read_slice)?;
8659 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8663 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8664 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8665 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8668 let destination_script = Readable::read(reader)?;
8670 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8671 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8672 let value_to_self_msat = Readable::read(reader)?;
8674 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8676 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8677 for _ in 0..pending_inbound_htlc_count {
8678 pending_inbound_htlcs.push(InboundHTLCOutput {
8679 htlc_id: Readable::read(reader)?,
8680 amount_msat: Readable::read(reader)?,
8681 cltv_expiry: Readable::read(reader)?,
8682 payment_hash: Readable::read(reader)?,
8683 state: match <u8 as Readable>::read(reader)? {
8684 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8685 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8686 3 => InboundHTLCState::Committed,
8687 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8688 _ => return Err(DecodeError::InvalidValue),
8693 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8694 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8695 for _ in 0..pending_outbound_htlc_count {
8696 pending_outbound_htlcs.push(OutboundHTLCOutput {
8697 htlc_id: Readable::read(reader)?,
8698 amount_msat: Readable::read(reader)?,
8699 cltv_expiry: Readable::read(reader)?,
8700 payment_hash: Readable::read(reader)?,
8701 source: Readable::read(reader)?,
8702 state: match <u8 as Readable>::read(reader)? {
8703 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8704 1 => OutboundHTLCState::Committed,
8706 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8707 OutboundHTLCState::RemoteRemoved(option.into())
8710 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8711 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8714 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8715 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8717 _ => return Err(DecodeError::InvalidValue),
8719 skimmed_fee_msat: None,
8720 blinding_point: None,
8724 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8725 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8726 for _ in 0..holding_cell_htlc_update_count {
8727 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8728 0 => HTLCUpdateAwaitingACK::AddHTLC {
8729 amount_msat: Readable::read(reader)?,
8730 cltv_expiry: Readable::read(reader)?,
8731 payment_hash: Readable::read(reader)?,
8732 source: Readable::read(reader)?,
8733 onion_routing_packet: Readable::read(reader)?,
8734 skimmed_fee_msat: None,
8735 blinding_point: None,
8737 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8738 payment_preimage: Readable::read(reader)?,
8739 htlc_id: Readable::read(reader)?,
8741 2 => HTLCUpdateAwaitingACK::FailHTLC {
8742 htlc_id: Readable::read(reader)?,
8743 err_packet: Readable::read(reader)?,
8745 _ => return Err(DecodeError::InvalidValue),
8749 let resend_order = match <u8 as Readable>::read(reader)? {
8750 0 => RAACommitmentOrder::CommitmentFirst,
8751 1 => RAACommitmentOrder::RevokeAndACKFirst,
8752 _ => return Err(DecodeError::InvalidValue),
8755 let monitor_pending_channel_ready = Readable::read(reader)?;
8756 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8757 let monitor_pending_commitment_signed = Readable::read(reader)?;
8759 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8760 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8761 for _ in 0..monitor_pending_forwards_count {
8762 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8765 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8766 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8767 for _ in 0..monitor_pending_failures_count {
8768 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8771 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8773 let holding_cell_update_fee = Readable::read(reader)?;
8775 let next_holder_htlc_id = Readable::read(reader)?;
8776 let next_counterparty_htlc_id = Readable::read(reader)?;
8777 let update_time_counter = Readable::read(reader)?;
8778 let feerate_per_kw = Readable::read(reader)?;
8780 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8781 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8782 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8783 // consider the stale state on reload.
8784 match <u8 as Readable>::read(reader)? {
8787 let _: u32 = Readable::read(reader)?;
8788 let _: u64 = Readable::read(reader)?;
8789 let _: Signature = Readable::read(reader)?;
8791 _ => return Err(DecodeError::InvalidValue),
8794 let funding_tx_confirmed_in = Readable::read(reader)?;
8795 let funding_tx_confirmation_height = Readable::read(reader)?;
8796 let short_channel_id = Readable::read(reader)?;
8798 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8799 let holder_dust_limit_satoshis = Readable::read(reader)?;
8800 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8801 let mut counterparty_selected_channel_reserve_satoshis = None;
8803 // Read the old serialization from version 0.0.98.
8804 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8806 // Read the 8 bytes of backwards-compatibility data.
8807 let _dummy: u64 = Readable::read(reader)?;
8809 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8810 let holder_htlc_minimum_msat = Readable::read(reader)?;
8811 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8813 let mut minimum_depth = None;
8815 // Read the old serialization from version 0.0.98.
8816 minimum_depth = Some(Readable::read(reader)?);
8818 // Read the 4 bytes of backwards-compatibility data.
8819 let _dummy: u32 = Readable::read(reader)?;
8822 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8824 1 => Some(CounterpartyForwardingInfo {
8825 fee_base_msat: Readable::read(reader)?,
8826 fee_proportional_millionths: Readable::read(reader)?,
8827 cltv_expiry_delta: Readable::read(reader)?,
8829 _ => return Err(DecodeError::InvalidValue),
8832 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8833 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8835 let counterparty_cur_commitment_point = Readable::read(reader)?;
8837 let counterparty_prev_commitment_point = Readable::read(reader)?;
8838 let counterparty_node_id = Readable::read(reader)?;
8840 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8841 let commitment_secrets = Readable::read(reader)?;
8843 let channel_update_status = Readable::read(reader)?;
8845 #[cfg(any(test, fuzzing))]
8846 let mut historical_inbound_htlc_fulfills = new_hash_set();
8847 #[cfg(any(test, fuzzing))]
8849 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8850 for _ in 0..htlc_fulfills_len {
8851 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8855 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8856 Some((feerate, if channel_parameters.is_outbound_from_holder {
8857 FeeUpdateState::Outbound
8859 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8865 let mut announcement_sigs = None;
8866 let mut target_closing_feerate_sats_per_kw = None;
8867 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8868 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8869 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8870 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8871 // only, so we default to that if none was written.
8872 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8873 let mut channel_creation_height = Some(serialized_height);
8874 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8876 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8877 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8878 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8879 let mut latest_inbound_scid_alias = None;
8880 let mut outbound_scid_alias = None;
8881 let mut channel_pending_event_emitted = None;
8882 let mut channel_ready_event_emitted = None;
8884 let mut user_id_high_opt: Option<u64> = None;
8885 let mut channel_keys_id: Option<[u8; 32]> = None;
8886 let mut temporary_channel_id: Option<ChannelId> = None;
8887 let mut holder_max_accepted_htlcs: Option<u16> = None;
8889 let mut blocked_monitor_updates = Some(Vec::new());
8891 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8892 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8894 let mut is_batch_funding: Option<()> = None;
8896 let mut local_initiated_shutdown: Option<()> = None;
8898 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8899 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8901 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8903 read_tlv_fields!(reader, {
8904 (0, announcement_sigs, option),
8905 (1, minimum_depth, option),
8906 (2, channel_type, option),
8907 (3, counterparty_selected_channel_reserve_satoshis, option),
8908 (4, holder_selected_channel_reserve_satoshis, option),
8909 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8910 (6, holder_max_htlc_value_in_flight_msat, option),
8911 (7, shutdown_scriptpubkey, option),
8912 (8, blocked_monitor_updates, optional_vec),
8913 (9, target_closing_feerate_sats_per_kw, option),
8914 (11, monitor_pending_finalized_fulfills, optional_vec),
8915 (13, channel_creation_height, option),
8916 (15, preimages_opt, optional_vec),
8917 (17, announcement_sigs_state, option),
8918 (19, latest_inbound_scid_alias, option),
8919 (21, outbound_scid_alias, option),
8920 (23, channel_ready_event_emitted, option),
8921 (25, user_id_high_opt, option),
8922 (27, channel_keys_id, option),
8923 (28, holder_max_accepted_htlcs, option),
8924 (29, temporary_channel_id, option),
8925 (31, channel_pending_event_emitted, option),
8926 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8927 (37, holding_cell_skimmed_fees_opt, optional_vec),
8928 (38, is_batch_funding, option),
8929 (39, pending_outbound_blinding_points_opt, optional_vec),
8930 (41, holding_cell_blinding_points_opt, optional_vec),
8931 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8932 (45, local_initiated_shutdown, option),
8935 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8936 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8937 // If we've gotten to the funding stage of the channel, populate the signer with its
8938 // required channel parameters.
8939 if channel_state >= ChannelState::FundingNegotiated {
8940 holder_signer.provide_channel_parameters(&channel_parameters);
8942 (channel_keys_id, holder_signer)
8944 // `keys_data` can be `None` if we had corrupted data.
8945 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8946 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8947 (holder_signer.channel_keys_id(), holder_signer)
8950 if let Some(preimages) = preimages_opt {
8951 let mut iter = preimages.into_iter();
8952 for htlc in pending_outbound_htlcs.iter_mut() {
8954 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8955 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8957 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8958 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8963 // We expect all preimages to be consumed above
8964 if iter.next().is_some() {
8965 return Err(DecodeError::InvalidValue);
8969 let chan_features = channel_type.as_ref().unwrap();
8970 if !chan_features.is_subset(our_supported_features) {
8971 // If the channel was written by a new version and negotiated with features we don't
8972 // understand yet, refuse to read it.
8973 return Err(DecodeError::UnknownRequiredFeature);
8976 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8977 // To account for that, we're proactively setting/overriding the field here.
8978 channel_parameters.channel_type_features = chan_features.clone();
8980 let mut secp_ctx = Secp256k1::new();
8981 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8983 // `user_id` used to be a single u64 value. In order to remain backwards
8984 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8985 // separate u64 values.
8986 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8988 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8990 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8991 let mut iter = skimmed_fees.into_iter();
8992 for htlc in pending_outbound_htlcs.iter_mut() {
8993 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8995 // We expect all skimmed fees to be consumed above
8996 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8998 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8999 let mut iter = skimmed_fees.into_iter();
9000 for htlc in holding_cell_htlc_updates.iter_mut() {
9001 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
9002 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9005 // We expect all skimmed fees to be consumed above
9006 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9008 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
9009 let mut iter = blinding_pts.into_iter();
9010 for htlc in pending_outbound_htlcs.iter_mut() {
9011 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9013 // We expect all blinding points to be consumed above
9014 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9016 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
9017 let mut iter = blinding_pts.into_iter();
9018 for htlc in holding_cell_htlc_updates.iter_mut() {
9019 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
9020 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9023 // We expect all blinding points to be consumed above
9024 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9027 if let Some(malformed_htlcs) = malformed_htlcs {
9028 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9029 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9030 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9031 let matches = *htlc_id == malformed_htlc_id;
9032 if matches { debug_assert!(err_packet.data.is_empty()) }
9035 }).ok_or(DecodeError::InvalidValue)?;
9036 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9037 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9039 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9044 context: ChannelContext {
9047 config: config.unwrap(),
9051 // Note that we don't care about serializing handshake limits as we only ever serialize
9052 // channel data after the handshake has completed.
9053 inbound_handshake_limits_override: None,
9056 temporary_channel_id,
9058 announcement_sigs_state: announcement_sigs_state.unwrap(),
9060 channel_value_satoshis,
9062 latest_monitor_update_id,
9064 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9065 shutdown_scriptpubkey,
9068 cur_holder_commitment_transaction_number,
9069 cur_counterparty_commitment_transaction_number,
9072 holder_max_accepted_htlcs,
9073 pending_inbound_htlcs,
9074 pending_outbound_htlcs,
9075 holding_cell_htlc_updates,
9079 monitor_pending_channel_ready,
9080 monitor_pending_revoke_and_ack,
9081 monitor_pending_commitment_signed,
9082 monitor_pending_forwards,
9083 monitor_pending_failures,
9084 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9086 signer_pending_commitment_update: false,
9087 signer_pending_funding: false,
9090 holding_cell_update_fee,
9091 next_holder_htlc_id,
9092 next_counterparty_htlc_id,
9093 update_time_counter,
9096 #[cfg(debug_assertions)]
9097 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9098 #[cfg(debug_assertions)]
9099 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9101 last_sent_closing_fee: None,
9102 pending_counterparty_closing_signed: None,
9103 expecting_peer_commitment_signed: false,
9104 closing_fee_limits: None,
9105 target_closing_feerate_sats_per_kw,
9107 funding_tx_confirmed_in,
9108 funding_tx_confirmation_height,
9110 channel_creation_height: channel_creation_height.unwrap(),
9112 counterparty_dust_limit_satoshis,
9113 holder_dust_limit_satoshis,
9114 counterparty_max_htlc_value_in_flight_msat,
9115 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9116 counterparty_selected_channel_reserve_satoshis,
9117 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9118 counterparty_htlc_minimum_msat,
9119 holder_htlc_minimum_msat,
9120 counterparty_max_accepted_htlcs,
9123 counterparty_forwarding_info,
9125 channel_transaction_parameters: channel_parameters,
9126 funding_transaction,
9129 counterparty_cur_commitment_point,
9130 counterparty_prev_commitment_point,
9131 counterparty_node_id,
9133 counterparty_shutdown_scriptpubkey,
9137 channel_update_status,
9138 closing_signed_in_flight: false,
9142 #[cfg(any(test, fuzzing))]
9143 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9144 #[cfg(any(test, fuzzing))]
9145 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9147 workaround_lnd_bug_4006: None,
9148 sent_message_awaiting_response: None,
9150 latest_inbound_scid_alias,
9151 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9152 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9154 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9155 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9157 #[cfg(any(test, fuzzing))]
9158 historical_inbound_htlc_fulfills,
9160 channel_type: channel_type.unwrap(),
9163 local_initiated_shutdown,
9165 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9167 #[cfg(dual_funding)]
9168 dual_funding_channel_context: None,
9176 use bitcoin::blockdata::constants::ChainHash;
9177 use bitcoin::blockdata::script::{ScriptBuf, Builder};
9178 use bitcoin::blockdata::transaction::{Transaction, TxOut};
9179 use bitcoin::blockdata::opcodes;
9180 use bitcoin::network::constants::Network;
9181 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9182 use crate::ln::{PaymentHash, PaymentPreimage};
9183 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9184 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9185 use crate::ln::channel::InitFeatures;
9186 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9187 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9188 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9189 use crate::ln::msgs;
9190 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9191 use crate::ln::script::ShutdownScript;
9192 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9193 use crate::chain::BestBlock;
9194 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9195 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9196 use crate::chain::transaction::OutPoint;
9197 use crate::routing::router::{Path, RouteHop};
9198 use crate::util::config::UserConfig;
9199 use crate::util::errors::APIError;
9200 use crate::util::ser::{ReadableArgs, Writeable};
9201 use crate::util::test_utils;
9202 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9203 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9204 use bitcoin::secp256k1::ffi::Signature as FFISignature;
9205 use bitcoin::secp256k1::{SecretKey,PublicKey};
9206 use bitcoin::hashes::sha256::Hash as Sha256;
9207 use bitcoin::hashes::Hash;
9208 use bitcoin::hashes::hex::FromHex;
9209 use bitcoin::hash_types::WPubkeyHash;
9210 use bitcoin::blockdata::locktime::absolute::LockTime;
9211 use bitcoin::address::{WitnessProgram, WitnessVersion};
9212 use crate::prelude::*;
9215 fn test_channel_state_order() {
9216 use crate::ln::channel::NegotiatingFundingFlags;
9217 use crate::ln::channel::AwaitingChannelReadyFlags;
9218 use crate::ln::channel::ChannelReadyFlags;
9220 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9221 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9222 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9223 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9226 struct TestFeeEstimator {
9229 impl FeeEstimator for TestFeeEstimator {
9230 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9236 fn test_max_funding_satoshis_no_wumbo() {
9237 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9238 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9239 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9243 signer: InMemorySigner,
9246 impl EntropySource for Keys {
9247 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9250 impl SignerProvider for Keys {
9251 type EcdsaSigner = InMemorySigner;
9253 type TaprootSigner = InMemorySigner;
9255 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9256 self.signer.channel_keys_id()
9259 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9263 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9265 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9266 let secp_ctx = Secp256k1::signing_only();
9267 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9268 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9269 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9272 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9273 let secp_ctx = Secp256k1::signing_only();
9274 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9275 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9279 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9280 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9281 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9285 fn upfront_shutdown_script_incompatibility() {
9286 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9287 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9288 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9291 let seed = [42; 32];
9292 let network = Network::Testnet;
9293 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9294 keys_provider.expect(OnGetShutdownScriptpubkey {
9295 returns: non_v0_segwit_shutdown_script.clone(),
9298 let secp_ctx = Secp256k1::new();
9299 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9300 let config = UserConfig::default();
9301 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9302 Err(APIError::IncompatibleShutdownScript { script }) => {
9303 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9305 Err(e) => panic!("Unexpected error: {:?}", e),
9306 Ok(_) => panic!("Expected error"),
9310 // Check that, during channel creation, we use the same feerate in the open channel message
9311 // as we do in the Channel object creation itself.
9313 fn test_open_channel_msg_fee() {
9314 let original_fee = 253;
9315 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9316 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9317 let secp_ctx = Secp256k1::new();
9318 let seed = [42; 32];
9319 let network = Network::Testnet;
9320 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9322 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9323 let config = UserConfig::default();
9324 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9326 // Now change the fee so we can check that the fee in the open_channel message is the
9327 // same as the old fee.
9328 fee_est.fee_est = 500;
9329 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9330 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9334 fn test_holder_vs_counterparty_dust_limit() {
9335 // Test that when calculating the local and remote commitment transaction fees, the correct
9336 // dust limits are used.
9337 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9338 let secp_ctx = Secp256k1::new();
9339 let seed = [42; 32];
9340 let network = Network::Testnet;
9341 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9342 let logger = test_utils::TestLogger::new();
9343 let best_block = BestBlock::from_network(network);
9345 // Go through the flow of opening a channel between two nodes, making sure
9346 // they have different dust limits.
9348 // Create Node A's channel pointing to Node B's pubkey
9349 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9350 let config = UserConfig::default();
9351 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9353 // Create Node B's channel by receiving Node A's open_channel message
9354 // Make sure A's dust limit is as we expect.
9355 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9356 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9357 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9359 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9360 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9361 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9362 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9363 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9365 // Node A --> Node B: funding created
9366 let output_script = node_a_chan.context.get_funding_redeemscript();
9367 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9368 value: 10000000, script_pubkey: output_script.clone(),
9370 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9371 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9372 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9374 // Node B --> Node A: funding signed
9375 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9376 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9378 // Put some inbound and outbound HTLCs in A's channel.
9379 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9380 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9382 amount_msat: htlc_amount_msat,
9383 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9384 cltv_expiry: 300000000,
9385 state: InboundHTLCState::Committed,
9388 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9390 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9391 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9392 cltv_expiry: 200000000,
9393 state: OutboundHTLCState::Committed,
9394 source: HTLCSource::OutboundRoute {
9395 path: Path { hops: Vec::new(), blinded_tail: None },
9396 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9397 first_hop_htlc_msat: 548,
9398 payment_id: PaymentId([42; 32]),
9400 skimmed_fee_msat: None,
9401 blinding_point: None,
9404 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9405 // the dust limit check.
9406 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9407 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9408 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9409 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9411 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9412 // of the HTLCs are seen to be above the dust limit.
9413 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9414 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9415 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9416 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9417 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9421 fn test_timeout_vs_success_htlc_dust_limit() {
9422 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9423 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9424 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9425 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9426 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9427 let secp_ctx = Secp256k1::new();
9428 let seed = [42; 32];
9429 let network = Network::Testnet;
9430 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9432 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9433 let config = UserConfig::default();
9434 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9436 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9437 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9439 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9440 // counted as dust when it shouldn't be.
9441 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9442 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9443 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9444 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9446 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9447 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9448 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9449 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9450 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9452 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9454 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9455 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9456 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9457 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9458 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9460 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9461 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9462 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9463 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9464 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9468 fn channel_reestablish_no_updates() {
9469 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9470 let logger = test_utils::TestLogger::new();
9471 let secp_ctx = Secp256k1::new();
9472 let seed = [42; 32];
9473 let network = Network::Testnet;
9474 let best_block = BestBlock::from_network(network);
9475 let chain_hash = ChainHash::using_genesis_block(network);
9476 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9478 // Go through the flow of opening a channel between two nodes.
9480 // Create Node A's channel pointing to Node B's pubkey
9481 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9482 let config = UserConfig::default();
9483 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9485 // Create Node B's channel by receiving Node A's open_channel message
9486 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9487 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9488 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9490 // Node B --> Node A: accept channel
9491 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9492 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9494 // Node A --> Node B: funding created
9495 let output_script = node_a_chan.context.get_funding_redeemscript();
9496 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9497 value: 10000000, script_pubkey: output_script.clone(),
9499 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9500 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9501 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9503 // Node B --> Node A: funding signed
9504 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9505 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9507 // Now disconnect the two nodes and check that the commitment point in
9508 // Node B's channel_reestablish message is sane.
9509 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9510 let msg = node_b_chan.get_channel_reestablish(&&logger);
9511 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9512 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9513 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9515 // Check that the commitment point in Node A's channel_reestablish message
9517 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9518 let msg = node_a_chan.get_channel_reestablish(&&logger);
9519 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9520 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9521 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9525 fn test_configured_holder_max_htlc_value_in_flight() {
9526 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9527 let logger = test_utils::TestLogger::new();
9528 let secp_ctx = Secp256k1::new();
9529 let seed = [42; 32];
9530 let network = Network::Testnet;
9531 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9532 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9533 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9535 let mut config_2_percent = UserConfig::default();
9536 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9537 let mut config_99_percent = UserConfig::default();
9538 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9539 let mut config_0_percent = UserConfig::default();
9540 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9541 let mut config_101_percent = UserConfig::default();
9542 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9544 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9545 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9546 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9547 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9548 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9549 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9551 // Test with the upper bound - 1 of valid values (99%).
9552 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9553 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9554 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9556 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9558 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9559 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9560 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9561 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9562 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9563 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9565 // Test with the upper bound - 1 of valid values (99%).
9566 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9567 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9568 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9570 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9571 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9572 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9573 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9574 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9576 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9577 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9579 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9580 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9581 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9583 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9584 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9585 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9586 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9587 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9589 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9590 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9592 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9593 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9594 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9598 fn test_configured_holder_selected_channel_reserve_satoshis() {
9600 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9601 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9602 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9604 // Test with valid but unreasonably high channel reserves
9605 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9606 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9607 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9609 // Test with calculated channel reserve less than lower bound
9610 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9611 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9613 // Test with invalid channel reserves since sum of both is greater than or equal
9615 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9616 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9619 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9620 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9621 let logger = test_utils::TestLogger::new();
9622 let secp_ctx = Secp256k1::new();
9623 let seed = [42; 32];
9624 let network = Network::Testnet;
9625 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9626 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9627 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9630 let mut outbound_node_config = UserConfig::default();
9631 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9632 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9634 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9635 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9637 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9638 let mut inbound_node_config = UserConfig::default();
9639 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9641 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9642 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9644 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9646 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9647 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9649 // Channel Negotiations failed
9650 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9651 assert!(result.is_err());
9656 fn channel_update() {
9657 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9658 let logger = test_utils::TestLogger::new();
9659 let secp_ctx = Secp256k1::new();
9660 let seed = [42; 32];
9661 let network = Network::Testnet;
9662 let best_block = BestBlock::from_network(network);
9663 let chain_hash = ChainHash::using_genesis_block(network);
9664 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9666 // Create Node A's channel pointing to Node B's pubkey
9667 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9668 let config = UserConfig::default();
9669 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9671 // Create Node B's channel by receiving Node A's open_channel message
9672 // Make sure A's dust limit is as we expect.
9673 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9674 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9675 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9677 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9678 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9679 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9680 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9681 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9683 // Node A --> Node B: funding created
9684 let output_script = node_a_chan.context.get_funding_redeemscript();
9685 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9686 value: 10000000, script_pubkey: output_script.clone(),
9688 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9689 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9690 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9692 // Node B --> Node A: funding signed
9693 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9694 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9696 // Make sure that receiving a channel update will update the Channel as expected.
9697 let update = ChannelUpdate {
9698 contents: UnsignedChannelUpdate {
9700 short_channel_id: 0,
9703 cltv_expiry_delta: 100,
9704 htlc_minimum_msat: 5,
9705 htlc_maximum_msat: MAX_VALUE_MSAT,
9707 fee_proportional_millionths: 11,
9708 excess_data: Vec::new(),
9710 signature: Signature::from(unsafe { FFISignature::new() })
9712 assert!(node_a_chan.channel_update(&update).unwrap());
9714 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9715 // change our official htlc_minimum_msat.
9716 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9717 match node_a_chan.context.counterparty_forwarding_info() {
9719 assert_eq!(info.cltv_expiry_delta, 100);
9720 assert_eq!(info.fee_base_msat, 110);
9721 assert_eq!(info.fee_proportional_millionths, 11);
9723 None => panic!("expected counterparty forwarding info to be Some")
9726 assert!(!node_a_chan.channel_update(&update).unwrap());
9730 fn blinding_point_skimmed_fee_malformed_ser() {
9731 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9733 let logger = test_utils::TestLogger::new();
9734 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9735 let secp_ctx = Secp256k1::new();
9736 let seed = [42; 32];
9737 let network = Network::Testnet;
9738 let best_block = BestBlock::from_network(network);
9739 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9741 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9742 let config = UserConfig::default();
9743 let features = channelmanager::provided_init_features(&config);
9744 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9745 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9747 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9748 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9749 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9751 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9752 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9753 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9755 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9756 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9757 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9758 Ok((chan, _, _)) => chan,
9759 Err((_, e)) => panic!("{}", e),
9762 let dummy_htlc_source = HTLCSource::OutboundRoute {
9764 hops: vec![RouteHop {
9765 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9766 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9767 cltv_expiry_delta: 0, maybe_announced_channel: false,
9771 session_priv: test_utils::privkey(42),
9772 first_hop_htlc_msat: 0,
9773 payment_id: PaymentId([42; 32]),
9775 let dummy_outbound_output = OutboundHTLCOutput {
9778 payment_hash: PaymentHash([43; 32]),
9780 state: OutboundHTLCState::Committed,
9781 source: dummy_htlc_source.clone(),
9782 skimmed_fee_msat: None,
9783 blinding_point: None,
9785 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9786 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9788 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9791 htlc.skimmed_fee_msat = Some(1);
9794 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9796 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9799 payment_hash: PaymentHash([43; 32]),
9800 source: dummy_htlc_source.clone(),
9801 onion_routing_packet: msgs::OnionPacket {
9803 public_key: Ok(test_utils::pubkey(1)),
9804 hop_data: [0; 20*65],
9807 skimmed_fee_msat: None,
9808 blinding_point: None,
9810 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9811 payment_preimage: PaymentPreimage([42; 32]),
9814 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9815 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9817 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9818 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9820 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9823 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9824 } else if i % 5 == 1 {
9825 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9826 } else if i % 5 == 2 {
9827 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9828 if let HTLCUpdateAwaitingACK::AddHTLC {
9829 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9830 } = &mut dummy_add {
9831 *blinding_point = Some(test_utils::pubkey(42 + i));
9832 *skimmed_fee_msat = Some(42);
9834 holding_cell_htlc_updates.push(dummy_add);
9835 } else if i % 5 == 3 {
9836 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9838 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9841 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9843 // Encode and decode the channel and ensure that the HTLCs within are the same.
9844 let encoded_chan = chan.encode();
9845 let mut s = crate::io::Cursor::new(&encoded_chan);
9846 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9847 let features = channelmanager::provided_channel_type_features(&config);
9848 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9849 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9850 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9853 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9855 fn outbound_commitment_test() {
9856 use bitcoin::sighash;
9857 use bitcoin::consensus::encode::serialize;
9858 use bitcoin::sighash::EcdsaSighashType;
9859 use bitcoin::hashes::hex::FromHex;
9860 use bitcoin::hash_types::Txid;
9861 use bitcoin::secp256k1::Message;
9862 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9863 use crate::ln::PaymentPreimage;
9864 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9865 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9866 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9867 use crate::util::logger::Logger;
9868 use crate::sync::Arc;
9869 use core::str::FromStr;
9870 use hex::DisplayHex;
9872 // Test vectors from BOLT 3 Appendices C and F (anchors):
9873 let feeest = TestFeeEstimator{fee_est: 15000};
9874 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9875 let secp_ctx = Secp256k1::new();
9877 let mut signer = InMemorySigner::new(
9879 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9880 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9881 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9882 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9883 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9885 // These aren't set in the test vectors:
9886 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9892 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9893 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9894 let keys_provider = Keys { signer: signer.clone() };
9896 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9897 let mut config = UserConfig::default();
9898 config.channel_handshake_config.announced_channel = false;
9899 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9900 chan.context.holder_dust_limit_satoshis = 546;
9901 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9903 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9905 let counterparty_pubkeys = ChannelPublicKeys {
9906 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9907 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9908 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9909 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9910 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9912 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9913 CounterpartyChannelTransactionParameters {
9914 pubkeys: counterparty_pubkeys.clone(),
9915 selected_contest_delay: 144
9917 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9918 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9920 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9921 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9923 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9924 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9926 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9927 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9929 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9930 // derived from a commitment_seed, so instead we copy it here and call
9931 // build_commitment_transaction.
9932 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9933 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9934 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9935 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9936 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9938 macro_rules! test_commitment {
9939 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9940 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9941 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9945 macro_rules! test_commitment_with_anchors {
9946 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9947 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9948 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9952 macro_rules! test_commitment_common {
9953 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9954 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9956 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9957 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9959 let htlcs = commitment_stats.htlcs_included.drain(..)
9960 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9962 (commitment_stats.tx, htlcs)
9964 let trusted_tx = commitment_tx.trust();
9965 let unsigned_tx = trusted_tx.built_transaction();
9966 let redeemscript = chan.context.get_funding_redeemscript();
9967 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9968 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9969 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9970 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9972 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9973 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9974 let mut counterparty_htlc_sigs = Vec::new();
9975 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9977 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9978 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9979 counterparty_htlc_sigs.push(remote_signature);
9981 assert_eq!(htlcs.len(), per_htlc.len());
9983 let holder_commitment_tx = HolderCommitmentTransaction::new(
9984 commitment_tx.clone(),
9985 counterparty_signature,
9986 counterparty_htlc_sigs,
9987 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9988 chan.context.counterparty_funding_pubkey()
9990 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9991 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9993 let funding_redeemscript = chan.context.get_funding_redeemscript();
9994 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9995 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9997 // ((htlc, counterparty_sig), (index, holder_sig))
9998 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
10001 log_trace!(logger, "verifying htlc {}", $htlc_idx);
10002 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10004 let ref htlc = htlcs[$htlc_idx];
10005 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
10006 chan.context.get_counterparty_selected_contest_delay().unwrap(),
10007 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
10008 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
10009 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
10010 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
10011 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
10013 let mut preimage: Option<PaymentPreimage> = None;
10016 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
10017 if out == htlc.payment_hash {
10018 preimage = Some(PaymentPreimage([i; 32]));
10022 assert!(preimage.is_some());
10025 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10026 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10027 channel_derivation_parameters: ChannelDerivationParameters {
10028 value_satoshis: chan.context.channel_value_satoshis,
10029 keys_id: chan.context.channel_keys_id,
10030 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10032 commitment_txid: trusted_tx.txid(),
10033 per_commitment_number: trusted_tx.commitment_number(),
10034 per_commitment_point: trusted_tx.per_commitment_point(),
10035 feerate_per_kw: trusted_tx.feerate_per_kw(),
10036 htlc: htlc.clone(),
10037 preimage: preimage.clone(),
10038 counterparty_sig: *htlc_counterparty_sig,
10039 }, &secp_ctx).unwrap();
10040 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10041 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10043 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10044 assert_eq!(signature, htlc_holder_sig, "htlc sig");
10045 let trusted_tx = holder_commitment_tx.trust();
10046 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10047 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10048 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10050 assert!(htlc_counterparty_sig_iter.next().is_none());
10054 // anchors: simple commitment tx with no HTLCs and single anchor
10055 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10056 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10057 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10059 // simple commitment tx with no HTLCs
10060 chan.context.value_to_self_msat = 7000000000;
10062 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10063 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10064 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10066 // anchors: simple commitment tx with no HTLCs
10067 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10068 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10069 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10071 chan.context.pending_inbound_htlcs.push({
10072 let mut out = InboundHTLCOutput{
10074 amount_msat: 1000000,
10076 payment_hash: PaymentHash([0; 32]),
10077 state: InboundHTLCState::Committed,
10079 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10082 chan.context.pending_inbound_htlcs.push({
10083 let mut out = InboundHTLCOutput{
10085 amount_msat: 2000000,
10087 payment_hash: PaymentHash([0; 32]),
10088 state: InboundHTLCState::Committed,
10090 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10093 chan.context.pending_outbound_htlcs.push({
10094 let mut out = OutboundHTLCOutput{
10096 amount_msat: 2000000,
10098 payment_hash: PaymentHash([0; 32]),
10099 state: OutboundHTLCState::Committed,
10100 source: HTLCSource::dummy(),
10101 skimmed_fee_msat: None,
10102 blinding_point: None,
10104 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10107 chan.context.pending_outbound_htlcs.push({
10108 let mut out = OutboundHTLCOutput{
10110 amount_msat: 3000000,
10112 payment_hash: PaymentHash([0; 32]),
10113 state: OutboundHTLCState::Committed,
10114 source: HTLCSource::dummy(),
10115 skimmed_fee_msat: None,
10116 blinding_point: None,
10118 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10121 chan.context.pending_inbound_htlcs.push({
10122 let mut out = InboundHTLCOutput{
10124 amount_msat: 4000000,
10126 payment_hash: PaymentHash([0; 32]),
10127 state: InboundHTLCState::Committed,
10129 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10133 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10134 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10135 chan.context.feerate_per_kw = 0;
10137 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10138 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10139 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10142 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10143 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10144 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10147 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10148 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10149 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10152 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10153 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10154 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10157 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10158 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10159 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10162 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10163 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10164 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10167 // commitment tx with seven outputs untrimmed (maximum feerate)
10168 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10169 chan.context.feerate_per_kw = 647;
10171 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10172 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10173 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10176 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10177 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10178 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10181 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10182 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10183 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10186 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10187 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10188 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10191 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10192 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10193 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10196 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10197 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10198 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10201 // commitment tx with six outputs untrimmed (minimum feerate)
10202 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10203 chan.context.feerate_per_kw = 648;
10205 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10206 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10207 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10210 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10211 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10212 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10215 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10216 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10217 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10220 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10221 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10222 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10225 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10226 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10227 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10230 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10231 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10232 chan.context.feerate_per_kw = 645;
10233 chan.context.holder_dust_limit_satoshis = 1001;
10235 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10236 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10237 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10240 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10241 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10242 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10245 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10246 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10247 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10250 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10251 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10252 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10255 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10256 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10257 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10260 // commitment tx with six outputs untrimmed (maximum feerate)
10261 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10262 chan.context.feerate_per_kw = 2069;
10263 chan.context.holder_dust_limit_satoshis = 546;
10265 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10266 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10267 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10270 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10271 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10272 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10275 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10276 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10277 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10280 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10281 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10282 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10285 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10286 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10287 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10290 // commitment tx with five outputs untrimmed (minimum feerate)
10291 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10292 chan.context.feerate_per_kw = 2070;
10294 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10295 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10296 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10299 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10300 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10301 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10304 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10305 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10306 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10309 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10310 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10311 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10314 // commitment tx with five outputs untrimmed (maximum feerate)
10315 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10316 chan.context.feerate_per_kw = 2194;
10318 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10319 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10320 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10323 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10324 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10325 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10328 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10329 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10330 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10333 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10334 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10335 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10338 // commitment tx with four outputs untrimmed (minimum feerate)
10339 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10340 chan.context.feerate_per_kw = 2195;
10342 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10343 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10344 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10347 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10348 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10349 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10352 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10353 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10354 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10357 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10358 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10359 chan.context.feerate_per_kw = 2185;
10360 chan.context.holder_dust_limit_satoshis = 2001;
10361 let cached_channel_type = chan.context.channel_type;
10362 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10364 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10365 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10366 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10369 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10370 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10371 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10374 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10375 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10376 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10379 // commitment tx with four outputs untrimmed (maximum feerate)
10380 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10381 chan.context.feerate_per_kw = 3702;
10382 chan.context.holder_dust_limit_satoshis = 546;
10383 chan.context.channel_type = cached_channel_type.clone();
10385 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10386 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10387 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10390 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10391 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10392 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10395 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10396 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10397 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10400 // commitment tx with three outputs untrimmed (minimum feerate)
10401 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10402 chan.context.feerate_per_kw = 3703;
10404 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10405 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10406 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10409 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10410 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10411 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10414 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10415 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10416 chan.context.feerate_per_kw = 3687;
10417 chan.context.holder_dust_limit_satoshis = 3001;
10418 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10420 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10421 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10422 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10425 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10426 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10427 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10430 // commitment tx with three outputs untrimmed (maximum feerate)
10431 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10432 chan.context.feerate_per_kw = 4914;
10433 chan.context.holder_dust_limit_satoshis = 546;
10434 chan.context.channel_type = cached_channel_type.clone();
10436 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10437 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10438 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10441 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10442 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10443 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10446 // commitment tx with two outputs untrimmed (minimum feerate)
10447 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10448 chan.context.feerate_per_kw = 4915;
10449 chan.context.holder_dust_limit_satoshis = 546;
10451 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10452 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10453 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10455 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10456 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10457 chan.context.feerate_per_kw = 4894;
10458 chan.context.holder_dust_limit_satoshis = 4001;
10459 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10461 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10462 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10463 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10465 // commitment tx with two outputs untrimmed (maximum feerate)
10466 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10467 chan.context.feerate_per_kw = 9651180;
10468 chan.context.holder_dust_limit_satoshis = 546;
10469 chan.context.channel_type = cached_channel_type.clone();
10471 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10472 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10473 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10475 // commitment tx with one output untrimmed (minimum feerate)
10476 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10477 chan.context.feerate_per_kw = 9651181;
10479 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10480 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10481 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10483 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10484 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10485 chan.context.feerate_per_kw = 6216010;
10486 chan.context.holder_dust_limit_satoshis = 4001;
10487 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10489 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10490 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10491 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10493 // commitment tx with fee greater than funder amount
10494 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10495 chan.context.feerate_per_kw = 9651936;
10496 chan.context.holder_dust_limit_satoshis = 546;
10497 chan.context.channel_type = cached_channel_type;
10499 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10500 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10501 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10503 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10504 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10505 chan.context.feerate_per_kw = 253;
10506 chan.context.pending_inbound_htlcs.clear();
10507 chan.context.pending_inbound_htlcs.push({
10508 let mut out = InboundHTLCOutput{
10510 amount_msat: 2000000,
10512 payment_hash: PaymentHash([0; 32]),
10513 state: InboundHTLCState::Committed,
10515 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10518 chan.context.pending_outbound_htlcs.clear();
10519 chan.context.pending_outbound_htlcs.push({
10520 let mut out = OutboundHTLCOutput{
10522 amount_msat: 5000001,
10524 payment_hash: PaymentHash([0; 32]),
10525 state: OutboundHTLCState::Committed,
10526 source: HTLCSource::dummy(),
10527 skimmed_fee_msat: None,
10528 blinding_point: None,
10530 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10533 chan.context.pending_outbound_htlcs.push({
10534 let mut out = OutboundHTLCOutput{
10536 amount_msat: 5000000,
10538 payment_hash: PaymentHash([0; 32]),
10539 state: OutboundHTLCState::Committed,
10540 source: HTLCSource::dummy(),
10541 skimmed_fee_msat: None,
10542 blinding_point: None,
10544 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10548 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10549 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10550 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10553 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10554 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10555 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10557 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10558 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10559 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10561 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10562 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10563 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10566 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10567 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10568 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10569 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10572 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10573 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10574 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10576 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10577 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10578 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10580 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10581 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10582 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10587 fn test_per_commitment_secret_gen() {
10588 // Test vectors from BOLT 3 Appendix D:
10590 let mut seed = [0; 32];
10591 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10592 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10593 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10595 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10596 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10597 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10599 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10600 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10602 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10603 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10605 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10606 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10607 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10611 fn test_key_derivation() {
10612 // Test vectors from BOLT 3 Appendix E:
10613 let secp_ctx = Secp256k1::new();
10615 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10616 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10618 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10619 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10621 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10622 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10624 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10625 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10627 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10628 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10630 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10631 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10635 fn test_zero_conf_channel_type_support() {
10636 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10637 let secp_ctx = Secp256k1::new();
10638 let seed = [42; 32];
10639 let network = Network::Testnet;
10640 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10641 let logger = test_utils::TestLogger::new();
10643 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10644 let config = UserConfig::default();
10645 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10646 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10648 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10649 channel_type_features.set_zero_conf_required();
10651 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10652 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10653 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10654 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10655 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10656 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10657 assert!(res.is_ok());
10661 fn test_supports_anchors_zero_htlc_tx_fee() {
10662 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10663 // resulting `channel_type`.
10664 let secp_ctx = Secp256k1::new();
10665 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10666 let network = Network::Testnet;
10667 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10668 let logger = test_utils::TestLogger::new();
10670 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10671 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10673 let mut config = UserConfig::default();
10674 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10676 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10677 // need to signal it.
10678 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10679 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10680 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10681 &config, 0, 42, None
10683 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10685 let mut expected_channel_type = ChannelTypeFeatures::empty();
10686 expected_channel_type.set_static_remote_key_required();
10687 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10689 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10690 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10691 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10695 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10696 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10697 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10698 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10699 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10702 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10703 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10707 fn test_rejects_implicit_simple_anchors() {
10708 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10709 // each side's `InitFeatures`, it is rejected.
10710 let secp_ctx = Secp256k1::new();
10711 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10712 let network = Network::Testnet;
10713 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10714 let logger = test_utils::TestLogger::new();
10716 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10717 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10719 let config = UserConfig::default();
10721 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10722 let static_remote_key_required: u64 = 1 << 12;
10723 let simple_anchors_required: u64 = 1 << 20;
10724 let raw_init_features = static_remote_key_required | simple_anchors_required;
10725 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10727 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10728 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10729 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10733 // Set `channel_type` to `None` to force the implicit feature negotiation.
10734 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10735 open_channel_msg.common_fields.channel_type = None;
10737 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10738 // `static_remote_key`, it will fail the channel.
10739 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10740 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10741 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10742 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10744 assert!(channel_b.is_err());
10748 fn test_rejects_simple_anchors_channel_type() {
10749 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10751 let secp_ctx = Secp256k1::new();
10752 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10753 let network = Network::Testnet;
10754 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10755 let logger = test_utils::TestLogger::new();
10757 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10758 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10760 let config = UserConfig::default();
10762 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10763 let static_remote_key_required: u64 = 1 << 12;
10764 let simple_anchors_required: u64 = 1 << 20;
10765 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10766 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10767 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10768 assert!(!simple_anchors_init.requires_unknown_bits());
10769 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10771 // First, we'll try to open a channel between A and B where A requests a channel type for
10772 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10773 // B as it's not supported by LDK.
10774 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10775 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10776 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10780 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10781 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10783 let res = InboundV1Channel::<&TestKeysInterface>::new(
10784 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10785 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10786 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10788 assert!(res.is_err());
10790 // Then, we'll try to open another channel where A requests a channel type for
10791 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10792 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10794 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10795 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10796 10000000, 100000, 42, &config, 0, 42, None
10799 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10801 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10802 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10803 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10804 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10807 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10808 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10810 let res = channel_a.accept_channel(
10811 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10813 assert!(res.is_err());
10817 fn test_waiting_for_batch() {
10818 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10819 let logger = test_utils::TestLogger::new();
10820 let secp_ctx = Secp256k1::new();
10821 let seed = [42; 32];
10822 let network = Network::Testnet;
10823 let best_block = BestBlock::from_network(network);
10824 let chain_hash = ChainHash::using_genesis_block(network);
10825 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10827 let mut config = UserConfig::default();
10828 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10829 // channel in a batch before all channels are ready.
10830 config.channel_handshake_limits.trust_own_funding_0conf = true;
10832 // Create a channel from node a to node b that will be part of batch funding.
10833 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10834 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10839 &channelmanager::provided_init_features(&config),
10849 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10850 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10851 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10856 &channelmanager::provided_channel_type_features(&config),
10857 &channelmanager::provided_init_features(&config),
10863 true, // Allow node b to send a 0conf channel_ready.
10866 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10867 node_a_chan.accept_channel(
10868 &accept_channel_msg,
10869 &config.channel_handshake_limits,
10870 &channelmanager::provided_init_features(&config),
10873 // Fund the channel with a batch funding transaction.
10874 let output_script = node_a_chan.context.get_funding_redeemscript();
10875 let tx = Transaction {
10877 lock_time: LockTime::ZERO,
10881 value: 10000000, script_pubkey: output_script.clone(),
10884 value: 10000000, script_pubkey: Builder::new().into_script(),
10887 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10888 let funding_created_msg = node_a_chan.get_funding_created(
10889 tx.clone(), funding_outpoint, true, &&logger,
10890 ).map_err(|_| ()).unwrap();
10891 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10892 &funding_created_msg.unwrap(),
10896 ).map_err(|_| ()).unwrap();
10897 let node_b_updates = node_b_chan.monitor_updating_restored(
10905 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10906 // broadcasting the funding transaction until the batch is ready.
10907 let res = node_a_chan.funding_signed(
10908 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10910 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10911 let node_a_updates = node_a_chan.monitor_updating_restored(
10918 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10919 // as the funding transaction depends on all channels in the batch becoming ready.
10920 assert!(node_a_updates.channel_ready.is_none());
10921 assert!(node_a_updates.funding_broadcastable.is_none());
10922 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10924 // It is possible to receive a 0conf channel_ready from the remote node.
10925 node_a_chan.channel_ready(
10926 &node_b_updates.channel_ready.unwrap(),
10934 node_a_chan.context.channel_state,
10935 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10938 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10939 node_a_chan.set_batch_ready();
10940 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10941 assert!(node_a_chan.check_get_channel_ready(0).is_some());