1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 /// Exposes the state of pending inbound HTLCs.
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 /// through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 /// through the exchange of commitment_signed and revoke_and_ack messages.
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178 /// before this HTLC is included on the remote commitment transaction.
179 AwaitingRemoteRevokeToAdd,
180 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181 /// and is included in both commitment transactions.
183 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186 /// payment, it will only be claimed together with other required parts.
188 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191 /// commitment transaction after update_fulfill_htlc.
192 AwaitingRemoteRevokeToRemoveFulfill,
193 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
197 AwaitingRemoteRevokeToRemoveFail,
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
203 InboundHTLCState::RemoteAnnounced(_) => None,
204 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208 InboundHTLCState::Committed =>
209 Some(InboundHTLCStateDetails::Committed),
210 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221 (0, AwaitingRemoteRevokeToAdd) => {},
222 (2, Committed) => {},
223 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224 (6, AwaitingRemoteRevokeToRemoveFail) => {};
227 struct InboundHTLCOutput {
231 payment_hash: PaymentHash,
232 state: InboundHTLCState,
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
239 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241 /// and not part of any commitment transaction.
243 /// The amount in msat.
244 pub amount_msat: u64,
245 /// The block height at which this HTLC expires.
246 pub cltv_expiry: u32,
247 /// The payment hash.
248 pub payment_hash: PaymentHash,
249 /// The state of the HTLC in the state machine.
251 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252 /// waiting for to advance to the next state.
254 /// See [`InboundHTLCStateDetails`] for information on the specific states.
256 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257 /// states may result in `None` here.
258 pub state: Option<InboundHTLCStateDetails>,
259 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260 /// from the local commitment transaction and added to the commitment transaction fee.
261 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262 /// transactions as well.
264 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
268 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274 (0, htlc_id, required),
275 (2, amount_msat, required),
276 (4, cltv_expiry, required),
277 (6, payment_hash, required),
278 (7, state, upgradable_option),
279 (8, is_dust, required),
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286 /// we will promote to Committed (note that they may not accept it until the next time we
287 /// revoke, but we don't really care about that:
288 /// * they've revoked, so worst case we can announce an old state and get our (option on)
289 /// money back (though we won't), and,
290 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
291 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
292 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293 /// we'll never get out of sync).
294 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295 /// OutboundHTLCOutput's size just for a temporary bit
296 LocalAnnounced(Box<msgs::OnionPacket>),
298 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299 /// the change (though they'll need to revoke before we fail the payment).
300 RemoteRemoved(OutboundHTLCOutcome),
301 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305 /// remote revoke_and_ack on a previous state before we can do so.
306 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311 /// revoke_and_ack to drop completely.
312 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
315 /// Exposes the state of pending outbound HTLCs.
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 /// through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 /// through the exchange of commitment_signed and revoke_and_ack messages.
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331 /// on the remote's commitment transaction after update_add_htlc.
332 AwaitingRemoteRevokeToAdd,
333 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334 /// and receiving revoke_and_ack in return.
336 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
339 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342 /// for the removal from its commitment transaction.
343 AwaitingRemoteRevokeToRemoveSuccess,
344 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347 /// for the removal from its commitment transaction.
348 AwaitingRemoteRevokeToRemoveFailure,
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
354 OutboundHTLCState::LocalAnnounced(_) =>
355 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356 OutboundHTLCState::Committed =>
357 OutboundHTLCStateDetails::Committed,
358 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
360 OutboundHTLCState::RemoteRemoved(_) =>
361 OutboundHTLCStateDetails::Committed,
362 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375 (0, AwaitingRemoteRevokeToAdd) => {},
376 (2, Committed) => {},
377 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384 /// LDK version 0.0.105+ will always fill in the preimage here.
385 Success(Option<PaymentPreimage>),
386 Failure(HTLCFailReason),
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390 fn from(o: Option<HTLCFailReason>) -> Self {
392 None => OutboundHTLCOutcome::Success(None),
393 Some(r) => OutboundHTLCOutcome::Failure(r)
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399 fn into(self) -> Option<&'a HTLCFailReason> {
401 OutboundHTLCOutcome::Success(_) => None,
402 OutboundHTLCOutcome::Failure(ref r) => Some(r)
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
412 payment_hash: PaymentHash,
413 state: OutboundHTLCState,
415 blinding_point: Option<PublicKey>,
416 skimmed_fee_msat: Option<u64>,
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
423 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425 /// and not part of any commitment transaction.
427 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428 pub htlc_id: Option<u64>,
429 /// The amount in msat.
430 pub amount_msat: u64,
431 /// The block height at which this HTLC expires.
432 pub cltv_expiry: u32,
433 /// The payment hash.
434 pub payment_hash: PaymentHash,
435 /// The state of the HTLC in the state machine.
437 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438 /// waiting for to advance to the next state.
440 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
442 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443 /// states may result in `None` here.
444 pub state: Option<OutboundHTLCStateDetails>,
445 /// The extra fee being skimmed off the top of this HTLC.
446 pub skimmed_fee_msat: Option<u64>,
447 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448 /// from the local commitment transaction and added to the commitment transaction fee.
449 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450 /// transactions as well.
452 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
456 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462 (0, htlc_id, required),
463 (2, amount_msat, required),
464 (4, cltv_expiry, required),
465 (6, payment_hash, required),
466 (7, state, upgradable_option),
467 (8, skimmed_fee_msat, required),
468 (10, is_dust, required),
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
478 payment_hash: PaymentHash,
480 onion_routing_packet: msgs::OnionPacket,
481 // The extra fee we're skimming off the top of this HTLC.
482 skimmed_fee_msat: Option<u64>,
483 blinding_point: Option<PublicKey>,
486 payment_preimage: PaymentPreimage,
491 err_packet: msgs::OnionErrorPacket,
496 sha256_of_onion: [u8; 32],
500 macro_rules! define_state_flags {
501 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502 #[doc = $flag_type_doc]
503 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504 struct $flag_type(u32);
509 const $flag: $flag_type = $flag_type($value);
512 /// All flags that apply to the specified [`ChannelState`] variant.
514 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
517 fn new() -> Self { Self(0) }
520 fn from_u32(flags: u32) -> Result<Self, ()> {
521 if flags & !Self::ALL.0 != 0 {
524 Ok($flag_type(flags))
529 fn is_empty(&self) -> bool { self.0 == 0 }
531 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
533 fn set(&mut self, flag: Self) { *self |= flag }
535 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
539 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
542 impl core::ops::BitOr for $flag_type {
544 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
546 impl core::ops::BitOrAssign for $flag_type {
547 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
549 impl core::ops::BitAnd for $flag_type {
551 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
553 impl core::ops::BitAndAssign for $flag_type {
554 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
557 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
560 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
563 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
565 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
567 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
570 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
573 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
582 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
584 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
586 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
589 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
591 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
593 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
596 impl PartialEq<FundedStateFlags> for $flag_type {
597 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
599 impl From<FundedStateFlags> for $flag_type {
600 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
605 /// We declare all the states/flags here together to help determine which bits are still available
608 pub const OUR_INIT_SENT: u32 = 1 << 0;
609 pub const THEIR_INIT_SENT: u32 = 1 << 1;
610 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614 pub const CHANNEL_READY: u32 = 1 << 6;
615 pub const PEER_DISCONNECTED: u32 = 1 << 7;
616 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
625 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
627 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631 somewhere and we should pause sending any outbound messages until they've managed to \
632 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
645 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646 NegotiatingFundingFlags, [
647 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
655 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656 FUNDED_STATE, AwaitingChannelReadyFlags, [
657 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666 is being held until all channels in the batch have received `funding_signed` and have \
667 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
673 "Flags that only apply to [`ChannelState::ChannelReady`].",
674 FUNDED_STATE, ChannelReadyFlags, [
675 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
688 /// We are negotiating the parameters required for the channel prior to funding it.
689 NegotiatingFunding(NegotiatingFundingFlags),
690 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
694 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695 /// funding transaction to confirm.
696 AwaitingChannelReady(AwaitingChannelReadyFlags),
697 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
699 ChannelReady(ChannelReadyFlags),
700 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701 /// is about to drop us, but we store this anyway.
705 macro_rules! impl_state_flag {
706 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
708 fn $get(&self) -> bool {
711 ChannelState::$state(flags) => flags.$get(),
720 ChannelState::$state(flags) => flags.$set(),
722 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
726 fn $clear(&mut self) {
729 ChannelState::$state(flags) => { let _ = flags.$clear(); },
731 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
735 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
738 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739 impl_state_flag!($get, $set, $clear, [$state]);
744 fn from_u32(state: u32) -> Result<Self, ()> {
746 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
749 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754 .map(|flags| ChannelState::ChannelReady(flags))
755 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756 Ok(ChannelState::NegotiatingFunding(flags))
764 fn to_u32(&self) -> u32 {
766 ChannelState::NegotiatingFunding(flags) => flags.0,
767 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
774 fn is_pre_funded_state(&self) -> bool {
775 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
778 fn is_both_sides_shutdown(&self) -> bool {
779 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
782 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
784 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786 _ => FundedStateFlags::new(),
790 fn can_generate_new_commitment(&self) -> bool {
792 ChannelState::ChannelReady(flags) =>
793 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
797 debug_assert!(false, "Can only generate new commitment within ChannelReady");
803 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
875 impl fmt::Debug for ChannelError {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
878 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
885 impl fmt::Display for ChannelError {
886 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
888 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889 &ChannelError::Warn(ref e) => write!(f, "{}", e),
890 &ChannelError::Close(ref e) => write!(f, "{}", e),
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
897 pub peer_id: Option<PublicKey>,
898 pub channel_id: Option<ChannelId>,
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902 fn log(&self, mut record: Record) {
903 record.peer_id = self.peer_id;
904 record.channel_id = self.channel_id;
905 self.logger.log(record)
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912 where S::Target: SignerProvider
916 peer_id: Some(context.counterparty_node_id),
917 channel_id: Some(context.channel_id),
922 macro_rules! secp_check {
923 ($res: expr, $err: expr) => {
926 Err(_) => return Err(ChannelError::Close($err)),
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938 /// We've announced the channel as enabled and are connected to our peer.
940 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
942 /// Our channel is live again, but we haven't announced the channel as enabled yet.
944 /// We've announced the channel as disabled.
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
950 pub enum AnnouncementSigsState {
951 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952 /// we sent the last `AnnouncementSignatures`.
954 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955 /// This state never appears on disk - instead we write `NotSent`.
957 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960 /// they send back a `RevokeAndACK`.
961 /// This state never appears on disk - instead we write `NotSent`.
963 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
968 /// An enum indicating whether the local or remote side offered a given HTLC.
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
977 pending_htlcs_value_msat: u64,
978 on_counterparty_tx_dust_exposure_msat: u64,
979 on_holder_tx_dust_exposure_msat: u64,
980 holding_cell_msat: u64,
981 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986 tx: CommitmentTransaction, // the transaction info
987 feerate_per_kw: u32, // the feerate included to build the transaction
988 total_fee_sat: u64, // the total fee included in the transaction
989 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
990 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991 local_balance_msat: u64, // local balance before fees *not* considering dust limits
992 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
1000 origin: HTLCInitiator,
1003 impl HTLCCandidate {
1004 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1014 enum UpdateFulfillFetch {
1016 monitor_update: ChannelMonitorUpdate,
1017 htlc_value_msat: u64,
1018 msg: Option<msgs::UpdateFulfillHTLC>,
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027 /// previously placed in the holding cell (and has since been removed).
1029 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030 monitor_update: ChannelMonitorUpdate,
1031 /// The value of the HTLC which was claimed, in msat.
1032 htlc_value_msat: u64,
1034 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035 /// or has been forgotten (presumably previously claimed).
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041 pub raa: Option<msgs::RevokeAndACK>,
1042 pub commitment_update: Option<msgs::CommitmentUpdate>,
1043 pub order: RAACommitmentOrder,
1044 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047 pub funding_broadcastable: Option<Transaction>,
1048 pub channel_ready: Option<msgs::ChannelReady>,
1049 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1052 /// The return value of `signer_maybe_unblocked`
1054 pub(super) struct SignerResumeUpdates {
1055 pub commitment_update: Option<msgs::CommitmentUpdate>,
1056 pub funding_signed: Option<msgs::FundingSigned>,
1057 pub channel_ready: Option<msgs::ChannelReady>,
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062 pub channel_ready: Option<msgs::ChannelReady>,
1063 pub raa: Option<msgs::RevokeAndACK>,
1064 pub commitment_update: Option<msgs::CommitmentUpdate>,
1065 pub order: RAACommitmentOrder,
1066 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067 pub shutdown_msg: Option<msgs::Shutdown>,
1070 /// The result of a shutdown that should be handled.
1072 pub(crate) struct ShutdownResult {
1073 pub(crate) closure_reason: ClosureReason,
1074 /// A channel monitor update to apply.
1075 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079 /// propagated to the remainder of the batch.
1080 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081 pub(crate) channel_id: ChannelId,
1082 pub(crate) user_channel_id: u128,
1083 pub(crate) channel_capacity_satoshis: u64,
1084 pub(crate) counterparty_node_id: PublicKey,
1085 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086 pub(crate) channel_funding_txo: Option<OutPoint>,
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1130 /// * The expected interval between ticks (1 minute).
1131 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1151 struct PendingChannelMonitorUpdate {
1152 update: ChannelMonitorUpdate,
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156 (0, update, required),
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162 UnfundedOutboundV1(OutboundV1Channel<SP>),
1163 UnfundedInboundV1(InboundV1Channel<SP>),
1164 #[cfg(dual_funding)]
1165 UnfundedOutboundV2(OutboundV2Channel<SP>),
1166 #[cfg(dual_funding)]
1167 UnfundedInboundV2(InboundV2Channel<SP>),
1168 Funded(Channel<SP>),
1171 impl<'a, SP: Deref> ChannelPhase<SP> where
1172 SP::Target: SignerProvider,
1173 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1175 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1177 ChannelPhase::Funded(chan) => &chan.context,
1178 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1179 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1180 #[cfg(dual_funding)]
1181 ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
1182 #[cfg(dual_funding)]
1183 ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
1187 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1189 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1190 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1191 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1192 #[cfg(dual_funding)]
1193 ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
1194 #[cfg(dual_funding)]
1195 ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
1200 /// Contains all state common to unfunded inbound/outbound channels.
1201 pub(super) struct UnfundedChannelContext {
1202 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1203 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1204 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1206 /// This is so that we don't keep channels around that haven't progressed to a funded state
1207 /// in a timely manner.
1208 unfunded_channel_age_ticks: usize,
1211 impl UnfundedChannelContext {
1212 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1213 /// having reached the unfunded channel age limit.
1215 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1216 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1217 self.unfunded_channel_age_ticks += 1;
1218 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1222 /// Contains everything about the channel including state, and various flags.
1223 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1224 config: LegacyChannelConfig,
1226 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1227 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1228 // have elapsed since the update occurred.
1229 prev_config: Option<(ChannelConfig, usize)>,
1231 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1235 /// The current channel ID.
1236 channel_id: ChannelId,
1237 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1238 /// Will be `None` for channels created prior to 0.0.115.
1239 temporary_channel_id: Option<ChannelId>,
1240 channel_state: ChannelState,
1242 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1243 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1245 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1246 // Note that a number of our tests were written prior to the behavior here which retransmits
1247 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1249 #[cfg(any(test, feature = "_test_utils"))]
1250 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1251 #[cfg(not(any(test, feature = "_test_utils")))]
1252 announcement_sigs_state: AnnouncementSigsState,
1254 secp_ctx: Secp256k1<secp256k1::All>,
1255 channel_value_satoshis: u64,
1257 latest_monitor_update_id: u64,
1259 holder_signer: ChannelSignerType<SP>,
1260 shutdown_scriptpubkey: Option<ShutdownScript>,
1261 destination_script: ScriptBuf,
1263 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1264 // generation start at 0 and count up...this simplifies some parts of implementation at the
1265 // cost of others, but should really just be changed.
1267 cur_holder_commitment_transaction_number: u64,
1268 cur_counterparty_commitment_transaction_number: u64,
1269 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1270 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1271 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1272 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1274 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1275 /// need to ensure we resend them in the order we originally generated them. Note that because
1276 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1277 /// sufficient to simply set this to the opposite of any message we are generating as we
1278 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1279 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1281 resend_order: RAACommitmentOrder,
1283 monitor_pending_channel_ready: bool,
1284 monitor_pending_revoke_and_ack: bool,
1285 monitor_pending_commitment_signed: bool,
1287 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1288 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1289 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1290 // but need to handle this somehow or we run the risk of losing HTLCs!
1291 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1292 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1293 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1295 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1296 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1297 /// the future when the signer indicates it may have a signature for us.
1299 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1300 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1301 signer_pending_commitment_update: bool,
1302 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1303 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1304 /// outbound or inbound.
1305 signer_pending_funding: bool,
1307 // pending_update_fee is filled when sending and receiving update_fee.
1309 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1310 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1311 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1312 // HTLCs with similar state.
1313 pending_update_fee: Option<(u32, FeeUpdateState)>,
1314 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1315 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1316 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1317 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1318 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1319 holding_cell_update_fee: Option<u32>,
1320 next_holder_htlc_id: u64,
1321 next_counterparty_htlc_id: u64,
1322 feerate_per_kw: u32,
1324 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1325 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1326 /// new block is received, ensuring it's always at least moderately close to the current real
1328 update_time_counter: u32,
1330 #[cfg(debug_assertions)]
1331 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1332 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1333 #[cfg(debug_assertions)]
1334 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1335 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1337 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1338 target_closing_feerate_sats_per_kw: Option<u32>,
1340 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1341 /// update, we need to delay processing it until later. We do that here by simply storing the
1342 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1343 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1345 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1346 /// transaction. These are set once we reach `closing_negotiation_ready`.
1348 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1350 closing_fee_limits: Option<(u64, u64)>,
1352 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1353 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1354 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1355 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1356 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1358 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1359 /// until we see a `commitment_signed` before doing so.
1361 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1362 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1363 expecting_peer_commitment_signed: bool,
1365 /// The hash of the block in which the funding transaction was included.
1366 funding_tx_confirmed_in: Option<BlockHash>,
1367 funding_tx_confirmation_height: u32,
1368 short_channel_id: Option<u64>,
1369 /// Either the height at which this channel was created or the height at which it was last
1370 /// serialized if it was serialized by versions prior to 0.0.103.
1371 /// We use this to close if funding is never broadcasted.
1372 channel_creation_height: u32,
1374 counterparty_dust_limit_satoshis: u64,
1377 pub(super) holder_dust_limit_satoshis: u64,
1379 holder_dust_limit_satoshis: u64,
1382 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1384 counterparty_max_htlc_value_in_flight_msat: u64,
1387 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1389 holder_max_htlc_value_in_flight_msat: u64,
1391 /// minimum channel reserve for self to maintain - set by them.
1392 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1395 pub(super) holder_selected_channel_reserve_satoshis: u64,
1397 holder_selected_channel_reserve_satoshis: u64,
1399 counterparty_htlc_minimum_msat: u64,
1400 holder_htlc_minimum_msat: u64,
1402 pub counterparty_max_accepted_htlcs: u16,
1404 counterparty_max_accepted_htlcs: u16,
1405 holder_max_accepted_htlcs: u16,
1406 minimum_depth: Option<u32>,
1408 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1410 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1411 funding_transaction: Option<Transaction>,
1412 is_batch_funding: Option<()>,
1414 counterparty_cur_commitment_point: Option<PublicKey>,
1415 counterparty_prev_commitment_point: Option<PublicKey>,
1416 counterparty_node_id: PublicKey,
1418 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1420 commitment_secrets: CounterpartyCommitmentSecrets,
1422 channel_update_status: ChannelUpdateStatus,
1423 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1424 /// not complete within a single timer tick (one minute), we should force-close the channel.
1425 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1427 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1428 /// our peer and start the closing_signed negotiation fresh.
1429 closing_signed_in_flight: bool,
1431 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1432 /// This can be used to rebroadcast the channel_announcement message later.
1433 announcement_sigs: Option<(Signature, Signature)>,
1435 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1436 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1437 // be, by comparing the cached values to the fee of the tranaction generated by
1438 // `build_commitment_transaction`.
1439 #[cfg(any(test, fuzzing))]
1440 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1441 #[cfg(any(test, fuzzing))]
1442 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1444 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1445 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1446 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1447 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1448 /// message until we receive a channel_reestablish.
1450 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1451 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1453 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1454 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1455 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1456 /// unblock the state machine.
1458 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1459 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1460 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1462 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1463 /// [`msgs::RevokeAndACK`] message from the counterparty.
1464 sent_message_awaiting_response: Option<usize>,
1466 #[cfg(any(test, fuzzing))]
1467 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1468 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1469 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1470 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1471 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1472 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1473 historical_inbound_htlc_fulfills: HashSet<u64>,
1475 /// This channel's type, as negotiated during channel open
1476 channel_type: ChannelTypeFeatures,
1478 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1479 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1480 // the channel's funding UTXO.
1482 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1483 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1484 // associated channel mapping.
1486 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1487 // to store all of them.
1488 latest_inbound_scid_alias: Option<u64>,
1490 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1491 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1492 // don't currently support node id aliases and eventually privacy should be provided with
1493 // blinded paths instead of simple scid+node_id aliases.
1494 outbound_scid_alias: u64,
1496 // We track whether we already emitted a `ChannelPending` event.
1497 channel_pending_event_emitted: bool,
1499 // We track whether we already emitted a `ChannelReady` event.
1500 channel_ready_event_emitted: bool,
1502 /// Some if we initiated to shut down the channel.
1503 local_initiated_shutdown: Option<()>,
1505 /// The unique identifier used to re-derive the private key material for the channel through
1506 /// [`SignerProvider::derive_channel_signer`].
1508 channel_keys_id: [u8; 32],
1510 pub channel_keys_id: [u8; 32],
1512 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1513 /// store it here and only release it to the `ChannelManager` once it asks for it.
1514 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1517 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1518 fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1519 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1520 entropy_source: &'a ES,
1521 signer_provider: &'a SP,
1522 counterparty_node_id: PublicKey,
1523 their_features: &'a InitFeatures,
1525 config: &'a UserConfig,
1526 current_chain_height: u32,
1529 our_funding_satoshis: u64,
1530 counterparty_pubkeys: ChannelPublicKeys,
1531 channel_type: ChannelTypeFeatures,
1532 holder_selected_channel_reserve_satoshis: u64,
1533 msg_channel_reserve_satoshis: u64,
1535 open_channel_fields: msgs::CommonOpenChannelFields,
1536 ) -> Result<ChannelContext<SP>, ChannelError>
1538 ES::Target: EntropySource,
1539 F::Target: FeeEstimator,
1541 SP::Target: SignerProvider,
1543 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1544 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1546 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1548 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1549 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1550 let pubkeys = holder_signer.pubkeys().clone();
1552 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1553 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1556 // Check sanity of message fields:
1557 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1558 return Err(ChannelError::Close(format!(
1559 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1560 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1561 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1563 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1564 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1566 if msg_channel_reserve_satoshis > channel_value_satoshis {
1567 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1569 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1570 if msg_push_msat > full_channel_value_msat {
1571 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1573 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1574 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1576 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1577 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1579 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1581 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1582 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1583 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1585 if open_channel_fields.max_accepted_htlcs < 1 {
1586 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1588 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1589 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1592 // Now check against optional parameters as set by config...
1593 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1594 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1596 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1597 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1599 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1600 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1602 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1603 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1605 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1606 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1608 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1609 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1611 if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1612 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1615 // Convert things into internal flags and prep our state:
1617 if config.channel_handshake_limits.force_announced_channel_preference {
1618 if config.channel_handshake_config.announced_channel != announced_channel {
1619 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1623 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1624 // Protocol level safety check in place, although it should never happen because
1625 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1626 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1628 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1629 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1631 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1632 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1633 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1635 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1636 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1639 // check if the funder's amount for the initial commitment tx is sufficient
1640 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1641 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1642 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1646 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1647 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1648 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1649 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1652 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1653 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1654 // want to push much to us), our counterparty should always have more than our reserve.
1655 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1656 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1659 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1660 match &open_channel_fields.shutdown_scriptpubkey {
1661 &Some(ref script) => {
1662 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1663 if script.len() == 0 {
1666 if !script::is_bolt2_compliant(&script, their_features) {
1667 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1669 Some(script.clone())
1672 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1674 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1679 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1680 match signer_provider.get_shutdown_scriptpubkey() {
1681 Ok(scriptpubkey) => Some(scriptpubkey),
1682 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1686 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1687 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1688 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1692 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1693 Ok(script) => script,
1694 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1697 let mut secp_ctx = Secp256k1::new();
1698 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1700 let minimum_depth = if is_0conf {
1703 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1706 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1708 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1710 let channel_context = ChannelContext {
1713 config: LegacyChannelConfig {
1714 options: config.channel_config.clone(),
1716 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1721 inbound_handshake_limits_override: None,
1723 temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1724 channel_id: open_channel_fields.temporary_channel_id,
1725 channel_state: ChannelState::NegotiatingFunding(
1726 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1728 announcement_sigs_state: AnnouncementSigsState::NotSent,
1731 latest_monitor_update_id: 0,
1733 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1734 shutdown_scriptpubkey,
1737 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1738 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1741 pending_inbound_htlcs: Vec::new(),
1742 pending_outbound_htlcs: Vec::new(),
1743 holding_cell_htlc_updates: Vec::new(),
1744 pending_update_fee: None,
1745 holding_cell_update_fee: None,
1746 next_holder_htlc_id: 0,
1747 next_counterparty_htlc_id: 0,
1748 update_time_counter: 1,
1750 resend_order: RAACommitmentOrder::CommitmentFirst,
1752 monitor_pending_channel_ready: false,
1753 monitor_pending_revoke_and_ack: false,
1754 monitor_pending_commitment_signed: false,
1755 monitor_pending_forwards: Vec::new(),
1756 monitor_pending_failures: Vec::new(),
1757 monitor_pending_finalized_fulfills: Vec::new(),
1759 signer_pending_commitment_update: false,
1760 signer_pending_funding: false,
1763 #[cfg(debug_assertions)]
1764 holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1765 #[cfg(debug_assertions)]
1766 counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1768 last_sent_closing_fee: None,
1769 pending_counterparty_closing_signed: None,
1770 expecting_peer_commitment_signed: false,
1771 closing_fee_limits: None,
1772 target_closing_feerate_sats_per_kw: None,
1774 funding_tx_confirmed_in: None,
1775 funding_tx_confirmation_height: 0,
1776 short_channel_id: None,
1777 channel_creation_height: current_chain_height,
1779 feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1780 channel_value_satoshis,
1781 counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1782 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1783 counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1784 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1785 counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1786 holder_selected_channel_reserve_satoshis,
1787 counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1788 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1789 counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1790 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1793 counterparty_forwarding_info: None,
1795 channel_transaction_parameters: ChannelTransactionParameters {
1796 holder_pubkeys: pubkeys,
1797 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1798 is_outbound_from_holder: false,
1799 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1800 selected_contest_delay: open_channel_fields.to_self_delay,
1801 pubkeys: counterparty_pubkeys,
1803 funding_outpoint: None,
1804 channel_type_features: channel_type.clone()
1806 funding_transaction: None,
1807 is_batch_funding: None,
1809 counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1810 counterparty_prev_commitment_point: None,
1811 counterparty_node_id,
1813 counterparty_shutdown_scriptpubkey,
1815 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1817 channel_update_status: ChannelUpdateStatus::Enabled,
1818 closing_signed_in_flight: false,
1820 announcement_sigs: None,
1822 #[cfg(any(test, fuzzing))]
1823 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1824 #[cfg(any(test, fuzzing))]
1825 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1827 workaround_lnd_bug_4006: None,
1828 sent_message_awaiting_response: None,
1830 latest_inbound_scid_alias: None,
1831 outbound_scid_alias: 0,
1833 channel_pending_event_emitted: false,
1834 channel_ready_event_emitted: false,
1836 #[cfg(any(test, fuzzing))]
1837 historical_inbound_htlc_fulfills: new_hash_set(),
1842 local_initiated_shutdown: None,
1844 blocked_monitor_updates: Vec::new(),
1850 fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1851 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1852 entropy_source: &'a ES,
1853 signer_provider: &'a SP,
1854 counterparty_node_id: PublicKey,
1855 their_features: &'a InitFeatures,
1856 funding_satoshis: u64,
1859 config: &'a UserConfig,
1860 current_chain_height: u32,
1861 outbound_scid_alias: u64,
1862 temporary_channel_id: Option<ChannelId>,
1863 holder_selected_channel_reserve_satoshis: u64,
1864 channel_keys_id: [u8; 32],
1865 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1866 pubkeys: ChannelPublicKeys,
1867 ) -> Result<ChannelContext<SP>, APIError>
1869 ES::Target: EntropySource,
1870 F::Target: FeeEstimator,
1871 SP::Target: SignerProvider,
1873 // This will be updated with the counterparty contribution if this is a dual-funded channel
1874 let channel_value_satoshis = funding_satoshis;
1876 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1878 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1879 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1881 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1882 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1884 let channel_value_msat = channel_value_satoshis * 1000;
1885 if push_msat > channel_value_msat {
1886 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1888 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1889 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1892 let channel_type = get_initial_channel_type(&config, their_features);
1893 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1895 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1896 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1898 (ConfirmationTarget::NonAnchorChannelFee, 0)
1900 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1902 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1903 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1904 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1905 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1908 let mut secp_ctx = Secp256k1::new();
1909 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1911 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1912 match signer_provider.get_shutdown_scriptpubkey() {
1913 Ok(scriptpubkey) => Some(scriptpubkey),
1914 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1918 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1919 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1920 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1924 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1925 Ok(script) => script,
1926 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1929 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1934 config: LegacyChannelConfig {
1935 options: config.channel_config.clone(),
1936 announced_channel: config.channel_handshake_config.announced_channel,
1937 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1942 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1944 channel_id: temporary_channel_id,
1945 temporary_channel_id: Some(temporary_channel_id),
1946 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1947 announcement_sigs_state: AnnouncementSigsState::NotSent,
1949 // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
1950 channel_value_satoshis,
1952 latest_monitor_update_id: 0,
1954 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1955 shutdown_scriptpubkey,
1958 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1959 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1962 pending_inbound_htlcs: Vec::new(),
1963 pending_outbound_htlcs: Vec::new(),
1964 holding_cell_htlc_updates: Vec::new(),
1965 pending_update_fee: None,
1966 holding_cell_update_fee: None,
1967 next_holder_htlc_id: 0,
1968 next_counterparty_htlc_id: 0,
1969 update_time_counter: 1,
1971 resend_order: RAACommitmentOrder::CommitmentFirst,
1973 monitor_pending_channel_ready: false,
1974 monitor_pending_revoke_and_ack: false,
1975 monitor_pending_commitment_signed: false,
1976 monitor_pending_forwards: Vec::new(),
1977 monitor_pending_failures: Vec::new(),
1978 monitor_pending_finalized_fulfills: Vec::new(),
1980 signer_pending_commitment_update: false,
1981 signer_pending_funding: false,
1983 // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
1984 // when we receive `accept_channel2`.
1985 #[cfg(debug_assertions)]
1986 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1987 #[cfg(debug_assertions)]
1988 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1990 last_sent_closing_fee: None,
1991 pending_counterparty_closing_signed: None,
1992 expecting_peer_commitment_signed: false,
1993 closing_fee_limits: None,
1994 target_closing_feerate_sats_per_kw: None,
1996 funding_tx_confirmed_in: None,
1997 funding_tx_confirmation_height: 0,
1998 short_channel_id: None,
1999 channel_creation_height: current_chain_height,
2001 feerate_per_kw: commitment_feerate,
2002 counterparty_dust_limit_satoshis: 0,
2003 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
2004 counterparty_max_htlc_value_in_flight_msat: 0,
2005 // We'll adjust this to include our counterparty's `funding_satoshis` when we
2006 // receive `accept_channel2`.
2007 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
2008 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
2009 holder_selected_channel_reserve_satoshis,
2010 counterparty_htlc_minimum_msat: 0,
2011 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2012 counterparty_max_accepted_htlcs: 0,
2013 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2014 minimum_depth: None, // Filled in in accept_channel
2016 counterparty_forwarding_info: None,
2018 channel_transaction_parameters: ChannelTransactionParameters {
2019 holder_pubkeys: pubkeys,
2020 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2021 is_outbound_from_holder: true,
2022 counterparty_parameters: None,
2023 funding_outpoint: None,
2024 channel_type_features: channel_type.clone()
2026 funding_transaction: None,
2027 is_batch_funding: None,
2029 counterparty_cur_commitment_point: None,
2030 counterparty_prev_commitment_point: None,
2031 counterparty_node_id,
2033 counterparty_shutdown_scriptpubkey: None,
2035 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2037 channel_update_status: ChannelUpdateStatus::Enabled,
2038 closing_signed_in_flight: false,
2040 announcement_sigs: None,
2042 #[cfg(any(test, fuzzing))]
2043 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2044 #[cfg(any(test, fuzzing))]
2045 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2047 workaround_lnd_bug_4006: None,
2048 sent_message_awaiting_response: None,
2050 latest_inbound_scid_alias: None,
2051 outbound_scid_alias,
2053 channel_pending_event_emitted: false,
2054 channel_ready_event_emitted: false,
2056 #[cfg(any(test, fuzzing))]
2057 historical_inbound_htlc_fulfills: new_hash_set(),
2062 blocked_monitor_updates: Vec::new(),
2063 local_initiated_shutdown: None,
2067 /// Allowed in any state (including after shutdown)
2068 pub fn get_update_time_counter(&self) -> u32 {
2069 self.update_time_counter
2072 pub fn get_latest_monitor_update_id(&self) -> u64 {
2073 self.latest_monitor_update_id
2076 pub fn should_announce(&self) -> bool {
2077 self.config.announced_channel
2080 pub fn is_outbound(&self) -> bool {
2081 self.channel_transaction_parameters.is_outbound_from_holder
2084 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2085 /// Allowed in any state (including after shutdown)
2086 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2087 self.config.options.forwarding_fee_base_msat
2090 /// Returns true if we've ever received a message from the remote end for this Channel
2091 pub fn have_received_message(&self) -> bool {
2092 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2095 /// Returns true if this channel is fully established and not known to be closing.
2096 /// Allowed in any state (including after shutdown)
2097 pub fn is_usable(&self) -> bool {
2098 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2099 !self.channel_state.is_local_shutdown_sent() &&
2100 !self.channel_state.is_remote_shutdown_sent() &&
2101 !self.monitor_pending_channel_ready
2104 /// shutdown state returns the state of the channel in its various stages of shutdown
2105 pub fn shutdown_state(&self) -> ChannelShutdownState {
2106 match self.channel_state {
2107 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2108 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2109 ChannelShutdownState::ShutdownInitiated
2110 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2111 ChannelShutdownState::ResolvingHTLCs
2112 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2113 ChannelShutdownState::NegotiatingClosingFee
2115 ChannelShutdownState::NotShuttingDown
2117 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2118 _ => ChannelShutdownState::NotShuttingDown,
2122 fn closing_negotiation_ready(&self) -> bool {
2123 let is_ready_to_close = match self.channel_state {
2124 ChannelState::AwaitingChannelReady(flags) =>
2125 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2126 ChannelState::ChannelReady(flags) =>
2127 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2130 self.pending_inbound_htlcs.is_empty() &&
2131 self.pending_outbound_htlcs.is_empty() &&
2132 self.pending_update_fee.is_none() &&
2136 /// Returns true if this channel is currently available for use. This is a superset of
2137 /// is_usable() and considers things like the channel being temporarily disabled.
2138 /// Allowed in any state (including after shutdown)
2139 pub fn is_live(&self) -> bool {
2140 self.is_usable() && !self.channel_state.is_peer_disconnected()
2143 // Public utilities:
2145 pub fn channel_id(&self) -> ChannelId {
2149 // Return the `temporary_channel_id` used during channel establishment.
2151 // Will return `None` for channels created prior to LDK version 0.0.115.
2152 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2153 self.temporary_channel_id
2156 pub fn minimum_depth(&self) -> Option<u32> {
2160 /// Gets the "user_id" value passed into the construction of this channel. It has no special
2161 /// meaning and exists only to allow users to have a persistent identifier of a channel.
2162 pub fn get_user_id(&self) -> u128 {
2166 /// Gets the channel's type
2167 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2171 /// Gets the channel's `short_channel_id`.
2173 /// Will return `None` if the channel hasn't been confirmed yet.
2174 pub fn get_short_channel_id(&self) -> Option<u64> {
2175 self.short_channel_id
2178 /// Allowed in any state (including after shutdown)
2179 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2180 self.latest_inbound_scid_alias
2183 /// Allowed in any state (including after shutdown)
2184 pub fn outbound_scid_alias(&self) -> u64 {
2185 self.outbound_scid_alias
2188 /// Returns the holder signer for this channel.
2190 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2191 return &self.holder_signer
2194 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2195 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2196 /// or prior to any channel actions during `Channel` initialization.
2197 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2198 debug_assert_eq!(self.outbound_scid_alias, 0);
2199 self.outbound_scid_alias = outbound_scid_alias;
2202 /// Returns the funding_txo we either got from our peer, or were given by
2203 /// get_funding_created.
2204 pub fn get_funding_txo(&self) -> Option<OutPoint> {
2205 self.channel_transaction_parameters.funding_outpoint
2208 /// Returns the height in which our funding transaction was confirmed.
2209 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2210 let conf_height = self.funding_tx_confirmation_height;
2211 if conf_height > 0 {
2218 /// Returns the block hash in which our funding transaction was confirmed.
2219 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2220 self.funding_tx_confirmed_in
2223 /// Returns the current number of confirmations on the funding transaction.
2224 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2225 if self.funding_tx_confirmation_height == 0 {
2226 // We either haven't seen any confirmation yet, or observed a reorg.
2230 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2233 fn get_holder_selected_contest_delay(&self) -> u16 {
2234 self.channel_transaction_parameters.holder_selected_contest_delay
2237 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2238 &self.channel_transaction_parameters.holder_pubkeys
2241 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2242 self.channel_transaction_parameters.counterparty_parameters
2243 .as_ref().map(|params| params.selected_contest_delay)
2246 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2247 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2250 /// Allowed in any state (including after shutdown)
2251 pub fn get_counterparty_node_id(&self) -> PublicKey {
2252 self.counterparty_node_id
2255 /// Allowed in any state (including after shutdown)
2256 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2257 self.holder_htlc_minimum_msat
2260 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2261 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2262 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2265 /// Allowed in any state (including after shutdown)
2266 pub fn get_announced_htlc_max_msat(&self) -> u64 {
2268 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2269 // to use full capacity. This is an effort to reduce routing failures, because in many cases
2270 // channel might have been used to route very small values (either by honest users or as DoS).
2271 self.channel_value_satoshis * 1000 * 9 / 10,
2273 self.counterparty_max_htlc_value_in_flight_msat
2277 /// Allowed in any state (including after shutdown)
2278 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2279 self.counterparty_htlc_minimum_msat
2282 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2283 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2284 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2287 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2288 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2289 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2291 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2292 party_max_htlc_value_in_flight_msat
2297 pub fn get_value_satoshis(&self) -> u64 {
2298 self.channel_value_satoshis
2301 pub fn get_fee_proportional_millionths(&self) -> u32 {
2302 self.config.options.forwarding_fee_proportional_millionths
2305 pub fn get_cltv_expiry_delta(&self) -> u16 {
2306 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2309 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2310 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2311 where F::Target: FeeEstimator
2313 match self.config.options.max_dust_htlc_exposure {
2314 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2315 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2316 ConfirmationTarget::OnChainSweep) as u64;
2317 feerate_per_kw.saturating_mul(multiplier)
2319 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2323 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2324 pub fn prev_config(&self) -> Option<ChannelConfig> {
2325 self.prev_config.map(|prev_config| prev_config.0)
2328 // Checks whether we should emit a `ChannelPending` event.
2329 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2330 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2333 // Returns whether we already emitted a `ChannelPending` event.
2334 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2335 self.channel_pending_event_emitted
2338 // Remembers that we already emitted a `ChannelPending` event.
2339 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2340 self.channel_pending_event_emitted = true;
2343 // Checks whether we should emit a `ChannelReady` event.
2344 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2345 self.is_usable() && !self.channel_ready_event_emitted
2348 // Remembers that we already emitted a `ChannelReady` event.
2349 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2350 self.channel_ready_event_emitted = true;
2353 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2354 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2355 /// no longer be considered when forwarding HTLCs.
2356 pub fn maybe_expire_prev_config(&mut self) {
2357 if self.prev_config.is_none() {
2360 let prev_config = self.prev_config.as_mut().unwrap();
2362 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2363 self.prev_config = None;
2367 /// Returns the current [`ChannelConfig`] applied to the channel.
2368 pub fn config(&self) -> ChannelConfig {
2372 /// Updates the channel's config. A bool is returned indicating whether the config update
2373 /// applied resulted in a new ChannelUpdate message.
2374 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2375 let did_channel_update =
2376 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2377 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2378 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2379 if did_channel_update {
2380 self.prev_config = Some((self.config.options, 0));
2381 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2382 // policy change to propagate throughout the network.
2383 self.update_time_counter += 1;
2385 self.config.options = *config;
2389 /// Returns true if funding_signed was sent/received and the
2390 /// funding transaction has been broadcast if necessary.
2391 pub fn is_funding_broadcast(&self) -> bool {
2392 !self.channel_state.is_pre_funded_state() &&
2393 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2396 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2397 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2398 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2399 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2400 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2402 /// @local is used only to convert relevant internal structures which refer to remote vs local
2403 /// to decide value of outputs and direction of HTLCs.
2404 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2405 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2406 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2407 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2408 /// which peer generated this transaction and "to whom" this transaction flows.
2410 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2411 where L::Target: Logger
2413 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2414 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2415 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2417 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2418 let mut remote_htlc_total_msat = 0;
2419 let mut local_htlc_total_msat = 0;
2420 let mut value_to_self_msat_offset = 0;
2422 let mut feerate_per_kw = self.feerate_per_kw;
2423 if let Some((feerate, update_state)) = self.pending_update_fee {
2424 if match update_state {
2425 // Note that these match the inclusion criteria when scanning
2426 // pending_inbound_htlcs below.
2427 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2428 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2429 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
2431 feerate_per_kw = feerate;
2435 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2436 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2437 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2439 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2441 macro_rules! get_htlc_in_commitment {
2442 ($htlc: expr, $offered: expr) => {
2443 HTLCOutputInCommitment {
2445 amount_msat: $htlc.amount_msat,
2446 cltv_expiry: $htlc.cltv_expiry,
2447 payment_hash: $htlc.payment_hash,
2448 transaction_output_index: None
2453 macro_rules! add_htlc_output {
2454 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2455 if $outbound == local { // "offered HTLC output"
2456 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2457 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2460 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2462 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2463 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2464 included_non_dust_htlcs.push((htlc_in_tx, $source));
2466 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2467 included_dust_htlcs.push((htlc_in_tx, $source));
2470 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2471 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2474 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2476 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2477 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2478 included_non_dust_htlcs.push((htlc_in_tx, $source));
2480 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2481 included_dust_htlcs.push((htlc_in_tx, $source));
2487 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2489 for ref htlc in self.pending_inbound_htlcs.iter() {
2490 let (include, state_name) = match htlc.state {
2491 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2492 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2493 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2494 InboundHTLCState::Committed => (true, "Committed"),
2495 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2499 add_htlc_output!(htlc, false, None, state_name);
2500 remote_htlc_total_msat += htlc.amount_msat;
2502 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2504 &InboundHTLCState::LocalRemoved(ref reason) => {
2505 if generated_by_local {
2506 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2507 inbound_htlc_preimages.push(preimage);
2508 value_to_self_msat_offset += htlc.amount_msat as i64;
2518 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2520 for ref htlc in self.pending_outbound_htlcs.iter() {
2521 let (include, state_name) = match htlc.state {
2522 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2523 OutboundHTLCState::Committed => (true, "Committed"),
2524 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2525 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2526 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2529 let preimage_opt = match htlc.state {
2530 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2531 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2532 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2536 if let Some(preimage) = preimage_opt {
2537 outbound_htlc_preimages.push(preimage);
2541 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2542 local_htlc_total_msat += htlc.amount_msat;
2544 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2546 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2547 value_to_self_msat_offset -= htlc.amount_msat as i64;
2549 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2550 if !generated_by_local {
2551 value_to_self_msat_offset -= htlc.amount_msat as i64;
2559 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2560 assert!(value_to_self_msat >= 0);
2561 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2562 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2563 // "violate" their reserve value by couting those against it. Thus, we have to convert
2564 // everything to i64 before subtracting as otherwise we can overflow.
2565 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2566 assert!(value_to_remote_msat >= 0);
2568 #[cfg(debug_assertions)]
2570 // Make sure that the to_self/to_remote is always either past the appropriate
2571 // channel_reserve *or* it is making progress towards it.
2572 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2573 self.holder_max_commitment_tx_output.lock().unwrap()
2575 self.counterparty_max_commitment_tx_output.lock().unwrap()
2577 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2578 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2579 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2580 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2583 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2584 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2585 let (value_to_self, value_to_remote) = if self.is_outbound() {
2586 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2588 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2591 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2592 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2593 let (funding_pubkey_a, funding_pubkey_b) = if local {
2594 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2596 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2599 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2600 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2605 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2606 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2611 let num_nondust_htlcs = included_non_dust_htlcs.len();
2613 let channel_parameters =
2614 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2615 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2616 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2623 &mut included_non_dust_htlcs,
2626 let mut htlcs_included = included_non_dust_htlcs;
2627 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2628 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2629 htlcs_included.append(&mut included_dust_htlcs);
2637 local_balance_msat: value_to_self_msat as u64,
2638 remote_balance_msat: value_to_remote_msat as u64,
2639 inbound_htlc_preimages,
2640 outbound_htlc_preimages,
2645 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2646 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2647 /// our counterparty!)
2648 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2649 /// TODO Some magic rust shit to compile-time check this?
2650 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2651 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2652 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2653 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2654 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2656 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2660 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2661 /// will sign and send to our counterparty.
2662 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2663 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2664 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2665 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2666 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2668 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2671 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2672 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2673 /// Panics if called before accept_channel/InboundV1Channel::new
2674 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2675 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2678 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2679 &self.get_counterparty_pubkeys().funding_pubkey
2682 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2686 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2687 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2688 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2689 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2690 // more dust balance if the feerate increases when we have several HTLCs pending
2691 // which are near the dust limit.
2692 let mut feerate_per_kw = self.feerate_per_kw;
2693 // If there's a pending update fee, use it to ensure we aren't under-estimating
2694 // potential feerate updates coming soon.
2695 if let Some((feerate, _)) = self.pending_update_fee {
2696 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2698 if let Some(feerate) = outbound_feerate_update {
2699 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2701 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2702 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2705 /// Get forwarding information for the counterparty.
2706 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2707 self.counterparty_forwarding_info.clone()
2710 /// Returns a HTLCStats about inbound pending htlcs
2711 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2713 let mut stats = HTLCStats {
2714 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2715 pending_htlcs_value_msat: 0,
2716 on_counterparty_tx_dust_exposure_msat: 0,
2717 on_holder_tx_dust_exposure_msat: 0,
2718 holding_cell_msat: 0,
2719 on_holder_tx_holding_cell_htlcs_count: 0,
2722 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2725 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2726 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2727 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2729 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2730 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2731 for ref htlc in context.pending_inbound_htlcs.iter() {
2732 stats.pending_htlcs_value_msat += htlc.amount_msat;
2733 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2734 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2736 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2737 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2743 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2744 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2746 let mut stats = HTLCStats {
2747 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2748 pending_htlcs_value_msat: 0,
2749 on_counterparty_tx_dust_exposure_msat: 0,
2750 on_holder_tx_dust_exposure_msat: 0,
2751 holding_cell_msat: 0,
2752 on_holder_tx_holding_cell_htlcs_count: 0,
2755 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2758 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2759 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2760 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2762 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2763 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2764 for ref htlc in context.pending_outbound_htlcs.iter() {
2765 stats.pending_htlcs_value_msat += htlc.amount_msat;
2766 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2767 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2769 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2770 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2774 for update in context.holding_cell_htlc_updates.iter() {
2775 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2776 stats.pending_htlcs += 1;
2777 stats.pending_htlcs_value_msat += amount_msat;
2778 stats.holding_cell_msat += amount_msat;
2779 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2780 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2782 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2783 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2785 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2792 /// Returns information on all pending inbound HTLCs.
2793 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2794 let mut holding_cell_states = new_hash_map();
2795 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2796 match holding_cell_update {
2797 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2798 holding_cell_states.insert(
2800 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2803 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2804 holding_cell_states.insert(
2806 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2809 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2810 holding_cell_states.insert(
2812 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2816 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2819 let mut inbound_details = Vec::new();
2820 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2823 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2824 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2826 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2827 for htlc in self.pending_inbound_htlcs.iter() {
2828 if let Some(state_details) = (&htlc.state).into() {
2829 inbound_details.push(InboundHTLCDetails{
2830 htlc_id: htlc.htlc_id,
2831 amount_msat: htlc.amount_msat,
2832 cltv_expiry: htlc.cltv_expiry,
2833 payment_hash: htlc.payment_hash,
2834 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2835 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2842 /// Returns information on all pending outbound HTLCs.
2843 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2844 let mut outbound_details = Vec::new();
2845 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2848 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2849 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2851 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2852 for htlc in self.pending_outbound_htlcs.iter() {
2853 outbound_details.push(OutboundHTLCDetails{
2854 htlc_id: Some(htlc.htlc_id),
2855 amount_msat: htlc.amount_msat,
2856 cltv_expiry: htlc.cltv_expiry,
2857 payment_hash: htlc.payment_hash,
2858 skimmed_fee_msat: htlc.skimmed_fee_msat,
2859 state: Some((&htlc.state).into()),
2860 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2863 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2864 if let HTLCUpdateAwaitingACK::AddHTLC {
2870 } = *holding_cell_update {
2871 outbound_details.push(OutboundHTLCDetails{
2873 amount_msat: amount_msat,
2874 cltv_expiry: cltv_expiry,
2875 payment_hash: payment_hash,
2876 skimmed_fee_msat: skimmed_fee_msat,
2877 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2878 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2885 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2886 /// Doesn't bother handling the
2887 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2888 /// corner case properly.
2889 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2890 -> AvailableBalances
2891 where F::Target: FeeEstimator
2893 let context = &self;
2894 // Note that we have to handle overflow due to the above case.
2895 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2896 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2898 let mut balance_msat = context.value_to_self_msat;
2899 for ref htlc in context.pending_inbound_htlcs.iter() {
2900 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2901 balance_msat += htlc.amount_msat;
2904 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2906 let outbound_capacity_msat = context.value_to_self_msat
2907 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2909 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2911 let mut available_capacity_msat = outbound_capacity_msat;
2913 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2914 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2918 if context.is_outbound() {
2919 // We should mind channel commit tx fee when computing how much of the available capacity
2920 // can be used in the next htlc. Mirrors the logic in send_htlc.
2922 // The fee depends on whether the amount we will be sending is above dust or not,
2923 // and the answer will in turn change the amount itself — making it a circular
2925 // This complicates the computation around dust-values, up to the one-htlc-value.
2926 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2927 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2928 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2931 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2932 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2933 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2934 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2935 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2936 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2937 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2940 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2941 // value ends up being below dust, we have this fee available again. In that case,
2942 // match the value to right-below-dust.
2943 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2944 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2945 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2946 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2947 debug_assert!(one_htlc_difference_msat != 0);
2948 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2949 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2950 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2952 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2955 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2956 // sending a new HTLC won't reduce their balance below our reserve threshold.
2957 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2958 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2959 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2962 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2963 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2965 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2966 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2967 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2969 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2970 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2971 // we've selected for them, we can only send dust HTLCs.
2972 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2976 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2978 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2979 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2980 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2981 // send above the dust limit (as the router can always overpay to meet the dust limit).
2982 let mut remaining_msat_below_dust_exposure_limit = None;
2983 let mut dust_exposure_dust_limit_msat = 0;
2984 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2986 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2987 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2989 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2990 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2991 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2993 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2994 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2995 remaining_msat_below_dust_exposure_limit =
2996 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2997 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
3000 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3001 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
3002 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
3003 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
3004 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
3005 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
3008 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
3009 if available_capacity_msat < dust_exposure_dust_limit_msat {
3010 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3012 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3016 available_capacity_msat = cmp::min(available_capacity_msat,
3017 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
3019 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
3020 available_capacity_msat = 0;
3024 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3025 - context.value_to_self_msat as i64
3026 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
3027 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3029 outbound_capacity_msat,
3030 next_outbound_htlc_limit_msat: available_capacity_msat,
3031 next_outbound_htlc_minimum_msat,
3036 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3037 let context = &self;
3038 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3041 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3042 /// number of pending HTLCs that are on track to be in our next commitment tx.
3044 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3045 /// `fee_spike_buffer_htlc` is `Some`.
3047 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3048 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3050 /// Dust HTLCs are excluded.
3051 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3052 let context = &self;
3053 assert!(context.is_outbound());
3055 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3058 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3059 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3061 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3062 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3064 let mut addl_htlcs = 0;
3065 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3067 HTLCInitiator::LocalOffered => {
3068 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3072 HTLCInitiator::RemoteOffered => {
3073 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3079 let mut included_htlcs = 0;
3080 for ref htlc in context.pending_inbound_htlcs.iter() {
3081 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3084 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3085 // transaction including this HTLC if it times out before they RAA.
3086 included_htlcs += 1;
3089 for ref htlc in context.pending_outbound_htlcs.iter() {
3090 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3094 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3095 OutboundHTLCState::Committed => included_htlcs += 1,
3096 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3097 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3098 // transaction won't be generated until they send us their next RAA, which will mean
3099 // dropping any HTLCs in this state.
3104 for htlc in context.holding_cell_htlc_updates.iter() {
3106 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3107 if amount_msat / 1000 < real_dust_limit_timeout_sat {
3112 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3113 // ack we're guaranteed to never include them in commitment txs anymore.
3117 let num_htlcs = included_htlcs + addl_htlcs;
3118 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3119 #[cfg(any(test, fuzzing))]
3122 if fee_spike_buffer_htlc.is_some() {
3123 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3125 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3126 + context.holding_cell_htlc_updates.len();
3127 let commitment_tx_info = CommitmentTxInfoCached {
3129 total_pending_htlcs,
3130 next_holder_htlc_id: match htlc.origin {
3131 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3132 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3134 next_counterparty_htlc_id: match htlc.origin {
3135 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3136 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3138 feerate: context.feerate_per_kw,
3140 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3145 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3146 /// pending HTLCs that are on track to be in their next commitment tx
3148 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3149 /// `fee_spike_buffer_htlc` is `Some`.
3151 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3152 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3154 /// Dust HTLCs are excluded.
3155 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3156 let context = &self;
3157 assert!(!context.is_outbound());
3159 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3162 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3163 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3165 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3166 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3168 let mut addl_htlcs = 0;
3169 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3171 HTLCInitiator::LocalOffered => {
3172 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3176 HTLCInitiator::RemoteOffered => {
3177 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3183 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3184 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3185 // committed outbound HTLCs, see below.
3186 let mut included_htlcs = 0;
3187 for ref htlc in context.pending_inbound_htlcs.iter() {
3188 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3191 included_htlcs += 1;
3194 for ref htlc in context.pending_outbound_htlcs.iter() {
3195 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3198 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3199 // i.e. if they've responded to us with an RAA after announcement.
3201 OutboundHTLCState::Committed => included_htlcs += 1,
3202 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3203 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3208 let num_htlcs = included_htlcs + addl_htlcs;
3209 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3210 #[cfg(any(test, fuzzing))]
3213 if fee_spike_buffer_htlc.is_some() {
3214 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3216 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3217 let commitment_tx_info = CommitmentTxInfoCached {
3219 total_pending_htlcs,
3220 next_holder_htlc_id: match htlc.origin {
3221 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3222 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3224 next_counterparty_htlc_id: match htlc.origin {
3225 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3226 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3228 feerate: context.feerate_per_kw,
3230 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3235 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3236 match self.channel_state {
3237 ChannelState::FundingNegotiated => f(),
3238 ChannelState::AwaitingChannelReady(flags) =>
3239 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3240 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3250 /// Returns the transaction if there is a pending funding transaction that is yet to be
3252 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3253 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3256 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3258 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3259 self.if_unbroadcasted_funding(||
3260 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3264 /// Returns whether the channel is funded in a batch.
3265 pub fn is_batch_funding(&self) -> bool {
3266 self.is_batch_funding.is_some()
3269 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3271 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3272 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3275 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3276 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3277 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3278 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3279 /// immediately (others we will have to allow to time out).
3280 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3281 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3282 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3283 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3284 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3285 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3287 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3288 // return them to fail the payment.
3289 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3290 let counterparty_node_id = self.get_counterparty_node_id();
3291 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3293 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3294 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3299 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3300 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3301 // returning a channel monitor update here would imply a channel monitor update before
3302 // we even registered the channel monitor to begin with, which is invalid.
3303 // Thus, if we aren't actually at a point where we could conceivably broadcast the
3304 // funding transaction, don't return a funding txo (which prevents providing the
3305 // monitor update to the user, even if we return one).
3306 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3307 if !self.channel_state.is_pre_funded_state() {
3308 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3309 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3310 update_id: self.latest_monitor_update_id,
3311 counterparty_node_id: Some(self.counterparty_node_id),
3312 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3313 channel_id: Some(self.channel_id()),
3317 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3318 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3320 self.channel_state = ChannelState::ShutdownComplete;
3321 self.update_time_counter += 1;
3325 dropped_outbound_htlcs,
3326 unbroadcasted_batch_funding_txid,
3327 channel_id: self.channel_id,
3328 user_channel_id: self.user_id,
3329 channel_capacity_satoshis: self.channel_value_satoshis,
3330 counterparty_node_id: self.counterparty_node_id,
3331 unbroadcasted_funding_tx,
3332 channel_funding_txo: self.get_funding_txo(),
3336 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3337 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3338 let counterparty_keys = self.build_remote_transaction_keys();
3339 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3341 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3342 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3343 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3344 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3346 match &self.holder_signer {
3347 // TODO (arik): move match into calling method for Taproot
3348 ChannelSignerType::Ecdsa(ecdsa) => {
3349 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3350 .map(|(signature, _)| msgs::FundingSigned {
3351 channel_id: self.channel_id(),
3354 partial_signature_with_nonce: None,
3358 if funding_signed.is_none() {
3359 #[cfg(not(async_signing))] {
3360 panic!("Failed to get signature for funding_signed");
3362 #[cfg(async_signing)] {
3363 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3364 self.signer_pending_funding = true;
3366 } else if self.signer_pending_funding {
3367 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3368 self.signer_pending_funding = false;
3371 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3372 (counterparty_initial_commitment_tx, funding_signed)
3374 // TODO (taproot|arik)
3380 /// If we receive an error message when attempting to open a channel, it may only be a rejection
3381 /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
3382 /// downgrade of channel features would be possible so that we can still open the channel.
3383 pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
3384 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
3387 F::Target: FeeEstimator
3389 if !self.is_outbound() ||
3391 self.channel_state, ChannelState::NegotiatingFunding(flags)
3392 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
3397 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
3398 // We've exhausted our options
3401 // We support opening a few different types of channels. Try removing our additional
3402 // features one by one until we've either arrived at our default or the counterparty has
3405 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
3406 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
3407 // checks whether the counterparty supports every feature, this would only happen if the
3408 // counterparty is advertising the feature, but rejecting channels proposing the feature for
3410 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
3411 self.channel_type.clear_anchors_zero_fee_htlc_tx();
3412 self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
3413 assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
3414 } else if self.channel_type.supports_scid_privacy() {
3415 self.channel_type.clear_scid_privacy();
3417 self.channel_type = ChannelTypeFeatures::only_static_remote_key();
3419 self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
3424 // Internal utility functions for channels
3426 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3427 /// `channel_value_satoshis` in msat, set through
3428 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3430 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3432 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3433 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3434 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3436 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3439 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3441 channel_value_satoshis * 10 * configured_percent
3444 /// Returns a minimum channel reserve value the remote needs to maintain,
3445 /// required by us according to the configured or default
3446 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3448 /// Guaranteed to return a value no larger than channel_value_satoshis
3450 /// This is used both for outbound and inbound channels and has lower bound
3451 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3452 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3453 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3454 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3457 /// This is for legacy reasons, present for forward-compatibility.
3458 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3459 /// from storage. Hence, we use this function to not persist default values of
3460 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3461 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3462 let (q, _) = channel_value_satoshis.overflowing_div(100);
3463 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3466 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3467 /// default of 1% of the total channel value.
3469 /// Guaranteed to return a value no larger than channel_value_satoshis
3471 /// This is used both for outbound and inbound channels and has lower bound
3472 /// of `dust_limit_satoshis`.
3473 #[cfg(dual_funding)]
3474 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3475 // Fixed at 1% of channel value by spec.
3476 let (q, _) = channel_value_satoshis.overflowing_div(100);
3477 cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3480 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3481 // Note that num_htlcs should not include dust HTLCs.
3483 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3484 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3487 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3488 // Note that num_htlcs should not include dust HTLCs.
3489 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3490 // Note that we need to divide before multiplying to round properly,
3491 // since the lowest denomination of bitcoin on-chain is the satoshi.
3492 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3495 /// Context for dual-funded channels.
3496 #[cfg(dual_funding)]
3497 pub(super) struct DualFundingChannelContext {
3498 /// The amount in satoshis we will be contributing to the channel.
3499 pub our_funding_satoshis: u64,
3500 /// The amount in satoshis our counterparty will be contributing to the channel.
3501 pub their_funding_satoshis: u64,
3502 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3503 /// to the current block height to align incentives against fee-sniping.
3504 pub funding_tx_locktime: u32,
3505 /// The feerate set by the initiator to be used for the funding transaction.
3506 pub funding_feerate_sat_per_1000_weight: u32,
3509 // Holder designates channel data owned for the benefit of the user client.
3510 // Counterparty designates channel data owned by the another channel participant entity.
3511 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3512 pub context: ChannelContext<SP>,
3513 #[cfg(dual_funding)]
3514 pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3517 #[cfg(any(test, fuzzing))]
3518 struct CommitmentTxInfoCached {
3520 total_pending_htlcs: usize,
3521 next_holder_htlc_id: u64,
3522 next_counterparty_htlc_id: u64,
3526 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3527 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3528 trait FailHTLCContents {
3529 type Message: FailHTLCMessageName;
3530 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3531 fn to_inbound_htlc_state(self) -> InboundHTLCState;
3532 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3534 impl FailHTLCContents for msgs::OnionErrorPacket {
3535 type Message = msgs::UpdateFailHTLC;
3536 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3537 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3539 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3540 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3542 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3543 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3546 impl FailHTLCContents for ([u8; 32], u16) {
3547 type Message = msgs::UpdateFailMalformedHTLC;
3548 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3549 msgs::UpdateFailMalformedHTLC {
3552 sha256_of_onion: self.0,
3553 failure_code: self.1
3556 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3557 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3559 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3560 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3562 sha256_of_onion: self.0,
3563 failure_code: self.1
3568 trait FailHTLCMessageName {
3569 fn name() -> &'static str;
3571 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3572 fn name() -> &'static str {
3576 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3577 fn name() -> &'static str {
3578 "update_fail_malformed_htlc"
3582 impl<SP: Deref> Channel<SP> where
3583 SP::Target: SignerProvider,
3584 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3586 fn check_remote_fee<F: Deref, L: Deref>(
3587 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3588 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3589 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3591 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3592 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3594 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3596 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3597 if feerate_per_kw < lower_limit {
3598 if let Some(cur_feerate) = cur_feerate_per_kw {
3599 if feerate_per_kw > cur_feerate {
3601 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3602 cur_feerate, feerate_per_kw);
3606 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3612 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3613 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3614 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3615 // outside of those situations will fail.
3616 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3620 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3625 1 + // script length (0)
3629 )*4 + // * 4 for non-witness parts
3630 2 + // witness marker and flag
3631 1 + // witness element count
3632 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3633 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3634 2*(1 + 71); // two signatures + sighash type flags
3635 if let Some(spk) = a_scriptpubkey {
3636 ret += ((8+1) + // output values and script length
3637 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3639 if let Some(spk) = b_scriptpubkey {
3640 ret += ((8+1) + // output values and script length
3641 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3647 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3648 assert!(self.context.pending_inbound_htlcs.is_empty());
3649 assert!(self.context.pending_outbound_htlcs.is_empty());
3650 assert!(self.context.pending_update_fee.is_none());
3652 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3653 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3654 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3656 if value_to_holder < 0 {
3657 assert!(self.context.is_outbound());
3658 total_fee_satoshis += (-value_to_holder) as u64;
3659 } else if value_to_counterparty < 0 {
3660 assert!(!self.context.is_outbound());
3661 total_fee_satoshis += (-value_to_counterparty) as u64;
3664 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3665 value_to_counterparty = 0;
3668 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3669 value_to_holder = 0;
3672 assert!(self.context.shutdown_scriptpubkey.is_some());
3673 let holder_shutdown_script = self.get_closing_scriptpubkey();
3674 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3675 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3677 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3678 (closing_transaction, total_fee_satoshis)
3681 fn funding_outpoint(&self) -> OutPoint {
3682 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3685 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3688 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3689 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3691 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3693 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3694 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3695 where L::Target: Logger {
3696 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3697 // (see equivalent if condition there).
3698 assert!(!self.context.channel_state.can_generate_new_commitment());
3699 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3700 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3701 self.context.latest_monitor_update_id = mon_update_id;
3702 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3703 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3707 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3708 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3709 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3710 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3712 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3713 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3716 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3717 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3718 // these, but for now we just have to treat them as normal.
3720 let mut pending_idx = core::usize::MAX;
3721 let mut htlc_value_msat = 0;
3722 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3723 if htlc.htlc_id == htlc_id_arg {
3724 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3725 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3726 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3728 InboundHTLCState::Committed => {},
3729 InboundHTLCState::LocalRemoved(ref reason) => {
3730 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3732 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3733 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3735 return UpdateFulfillFetch::DuplicateClaim {};
3738 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3739 // Don't return in release mode here so that we can update channel_monitor
3743 htlc_value_msat = htlc.amount_msat;
3747 if pending_idx == core::usize::MAX {
3748 #[cfg(any(test, fuzzing))]
3749 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3750 // this is simply a duplicate claim, not previously failed and we lost funds.
3751 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3752 return UpdateFulfillFetch::DuplicateClaim {};
3755 // Now update local state:
3757 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3758 // can claim it even if the channel hits the chain before we see their next commitment.
3759 self.context.latest_monitor_update_id += 1;
3760 let monitor_update = ChannelMonitorUpdate {
3761 update_id: self.context.latest_monitor_update_id,
3762 counterparty_node_id: Some(self.context.counterparty_node_id),
3763 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3764 payment_preimage: payment_preimage_arg.clone(),
3766 channel_id: Some(self.context.channel_id()),
3769 if !self.context.channel_state.can_generate_new_commitment() {
3770 // Note that this condition is the same as the assertion in
3771 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3772 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3773 // do not not get into this branch.
3774 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3775 match pending_update {
3776 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3777 if htlc_id_arg == htlc_id {
3778 // Make sure we don't leave latest_monitor_update_id incremented here:
3779 self.context.latest_monitor_update_id -= 1;
3780 #[cfg(any(test, fuzzing))]
3781 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3782 return UpdateFulfillFetch::DuplicateClaim {};
3785 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3786 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3788 if htlc_id_arg == htlc_id {
3789 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3790 // TODO: We may actually be able to switch to a fulfill here, though its
3791 // rare enough it may not be worth the complexity burden.
3792 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3793 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3799 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3800 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3801 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3803 #[cfg(any(test, fuzzing))]
3804 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3805 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3807 #[cfg(any(test, fuzzing))]
3808 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3811 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3812 if let InboundHTLCState::Committed = htlc.state {
3814 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3815 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3817 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3818 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3821 UpdateFulfillFetch::NewClaim {
3824 msg: Some(msgs::UpdateFulfillHTLC {
3825 channel_id: self.context.channel_id(),
3826 htlc_id: htlc_id_arg,
3827 payment_preimage: payment_preimage_arg,
3832 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3833 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3834 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3835 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3836 // Even if we aren't supposed to let new monitor updates with commitment state
3837 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3838 // matter what. Sadly, to push a new monitor update which flies before others
3839 // already queued, we have to insert it into the pending queue and update the
3840 // update_ids of all the following monitors.
3841 if release_cs_monitor && msg.is_some() {
3842 let mut additional_update = self.build_commitment_no_status_check(logger);
3843 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3844 // to be strictly increasing by one, so decrement it here.
3845 self.context.latest_monitor_update_id = monitor_update.update_id;
3846 monitor_update.updates.append(&mut additional_update.updates);
3848 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3849 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3850 monitor_update.update_id = new_mon_id;
3851 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3852 held_update.update.update_id += 1;
3855 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3856 let update = self.build_commitment_no_status_check(logger);
3857 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3863 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3864 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3866 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3870 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3871 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3872 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3873 /// before we fail backwards.
3875 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3876 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3877 /// [`ChannelError::Ignore`].
3878 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3879 -> Result<(), ChannelError> where L::Target: Logger {
3880 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3881 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3884 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3885 /// want to fail blinded HTLCs where we are not the intro node.
3887 /// See [`Self::queue_fail_htlc`] for more info.
3888 pub fn queue_fail_malformed_htlc<L: Deref>(
3889 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3890 ) -> Result<(), ChannelError> where L::Target: Logger {
3891 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3892 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3895 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3896 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3897 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3898 /// before we fail backwards.
3900 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3901 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3902 /// [`ChannelError::Ignore`].
3903 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3904 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3906 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3907 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3908 panic!("Was asked to fail an HTLC when channel was not in an operational state");
3911 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3912 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3913 // these, but for now we just have to treat them as normal.
3915 let mut pending_idx = core::usize::MAX;
3916 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3917 if htlc.htlc_id == htlc_id_arg {
3919 InboundHTLCState::Committed => {},
3920 InboundHTLCState::LocalRemoved(ref reason) => {
3921 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3923 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3928 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3929 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3935 if pending_idx == core::usize::MAX {
3936 #[cfg(any(test, fuzzing))]
3937 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3938 // is simply a duplicate fail, not previously failed and we failed-back too early.
3939 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3943 if !self.context.channel_state.can_generate_new_commitment() {
3944 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3945 force_holding_cell = true;
3948 // Now update local state:
3949 if force_holding_cell {
3950 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3951 match pending_update {
3952 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3953 if htlc_id_arg == htlc_id {
3954 #[cfg(any(test, fuzzing))]
3955 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3959 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3960 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3962 if htlc_id_arg == htlc_id {
3963 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3964 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3970 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3971 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3975 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3976 E::Message::name(), &self.context.channel_id());
3978 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3979 htlc.state = err_contents.clone().to_inbound_htlc_state();
3982 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3985 // Message handlers:
3986 /// Updates the state of the channel to indicate that all channels in the batch have received
3987 /// funding_signed and persisted their monitors.
3988 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3989 /// treated as a non-batch channel going forward.
3990 pub fn set_batch_ready(&mut self) {
3991 self.context.is_batch_funding = None;
3992 self.context.channel_state.clear_waiting_for_batch();
3995 /// Unsets the existing funding information.
3997 /// This must only be used if the channel has not yet completed funding and has not been used.
3999 /// Further, the channel must be immediately shut down after this with a call to
4000 /// [`ChannelContext::force_shutdown`].
4001 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
4002 debug_assert!(matches!(
4003 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
4005 self.context.channel_transaction_parameters.funding_outpoint = None;
4006 self.context.channel_id = temporary_channel_id;
4009 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
4010 /// and the channel is now usable (and public), this may generate an announcement_signatures to
4012 pub fn channel_ready<NS: Deref, L: Deref>(
4013 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
4014 user_config: &UserConfig, best_block: &BestBlock, logger: &L
4015 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
4017 NS::Target: NodeSigner,
4020 if self.context.channel_state.is_peer_disconnected() {
4021 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
4022 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
4025 if let Some(scid_alias) = msg.short_channel_id_alias {
4026 if Some(scid_alias) != self.context.short_channel_id {
4027 // The scid alias provided can be used to route payments *from* our counterparty,
4028 // i.e. can be used for inbound payments and provided in invoices, but is not used
4029 // when routing outbound payments.
4030 self.context.latest_inbound_scid_alias = Some(scid_alias);
4034 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
4035 // batch, but we can receive channel_ready messages.
4036 let mut check_reconnection = false;
4037 match &self.context.channel_state {
4038 ChannelState::AwaitingChannelReady(flags) => {
4039 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
4040 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4041 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
4042 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4043 check_reconnection = true;
4044 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
4045 self.context.channel_state.set_their_channel_ready();
4046 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
4047 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
4048 self.context.update_time_counter += 1;
4050 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
4051 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4054 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4055 ChannelState::ChannelReady(_) => check_reconnection = true,
4056 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4058 if check_reconnection {
4059 // They probably disconnected/reconnected and re-sent the channel_ready, which is
4060 // required, or they're sending a fresh SCID alias.
4061 let expected_point =
4062 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4063 // If they haven't ever sent an updated point, the point they send should match
4065 self.context.counterparty_cur_commitment_point
4066 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4067 // If we've advanced the commitment number once, the second commitment point is
4068 // at `counterparty_prev_commitment_point`, which is not yet revoked.
4069 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4070 self.context.counterparty_prev_commitment_point
4072 // If they have sent updated points, channel_ready is always supposed to match
4073 // their "first" point, which we re-derive here.
4074 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4075 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4076 ).expect("We already advanced, so previous secret keys should have been validated already")))
4078 if expected_point != Some(msg.next_per_commitment_point) {
4079 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4084 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4085 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4087 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4089 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
4092 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4093 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4094 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4095 ) -> Result<(), ChannelError>
4096 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4097 FE::Target: FeeEstimator, L::Target: Logger,
4099 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4100 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4102 // We can't accept HTLCs sent after we've sent a shutdown.
4103 if self.context.channel_state.is_local_shutdown_sent() {
4104 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4106 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4107 if self.context.channel_state.is_remote_shutdown_sent() {
4108 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4110 if self.context.channel_state.is_peer_disconnected() {
4111 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4113 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4114 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4116 if msg.amount_msat == 0 {
4117 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4119 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4120 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4123 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4124 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4125 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
4126 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4128 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4129 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4132 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4133 // the reserve_satoshis we told them to always have as direct payment so that they lose
4134 // something if we punish them for broadcasting an old state).
4135 // Note that we don't really care about having a small/no to_remote output in our local
4136 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4137 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4138 // present in the next commitment transaction we send them (at least for fulfilled ones,
4139 // failed ones won't modify value_to_self).
4140 // Note that we will send HTLCs which another instance of rust-lightning would think
4141 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4142 // Channel state once they will not be present in the next received commitment
4144 let mut removed_outbound_total_msat = 0;
4145 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4146 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4147 removed_outbound_total_msat += htlc.amount_msat;
4148 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4149 removed_outbound_total_msat += htlc.amount_msat;
4153 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4154 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4157 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4158 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4159 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4161 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4162 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4163 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4164 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4165 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4166 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4167 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4171 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4172 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4173 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4174 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4175 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4176 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4177 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4181 let pending_value_to_self_msat =
4182 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
4183 let pending_remote_value_msat =
4184 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4185 if pending_remote_value_msat < msg.amount_msat {
4186 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4189 // Check that the remote can afford to pay for this HTLC on-chain at the current
4190 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4192 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4193 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4194 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4196 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4197 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4201 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4202 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4204 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4205 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4209 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4210 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4214 if !self.context.is_outbound() {
4215 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4216 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
4217 // side, only on the sender's. Note that with anchor outputs we are no longer as
4218 // sensitive to fee spikes, so we need to account for them.
4219 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4220 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4221 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4222 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4224 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4225 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
4226 // the HTLC, i.e. its status is already set to failing.
4227 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4228 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4231 // Check that they won't violate our local required channel reserve by adding this HTLC.
4232 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4233 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4234 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4235 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4238 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4239 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4241 if msg.cltv_expiry >= 500000000 {
4242 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4245 if self.context.channel_state.is_local_shutdown_sent() {
4246 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4247 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4251 // Now update local state:
4252 self.context.next_counterparty_htlc_id += 1;
4253 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4254 htlc_id: msg.htlc_id,
4255 amount_msat: msg.amount_msat,
4256 payment_hash: msg.payment_hash,
4257 cltv_expiry: msg.cltv_expiry,
4258 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
4263 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4265 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4266 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4267 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4268 if htlc.htlc_id == htlc_id {
4269 let outcome = match check_preimage {
4270 None => fail_reason.into(),
4271 Some(payment_preimage) => {
4272 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4273 if payment_hash != htlc.payment_hash {
4274 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4276 OutboundHTLCOutcome::Success(Some(payment_preimage))
4280 OutboundHTLCState::LocalAnnounced(_) =>
4281 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4282 OutboundHTLCState::Committed => {
4283 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4285 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4286 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4291 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4294 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4295 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4296 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4298 if self.context.channel_state.is_peer_disconnected() {
4299 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4302 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4305 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4306 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4307 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4309 if self.context.channel_state.is_peer_disconnected() {
4310 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4313 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4317 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4318 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4319 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4321 if self.context.channel_state.is_peer_disconnected() {
4322 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4325 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4329 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4330 where L::Target: Logger
4332 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4333 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4335 if self.context.channel_state.is_peer_disconnected() {
4336 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4338 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4339 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4342 let funding_script = self.context.get_funding_redeemscript();
4344 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4346 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4347 let commitment_txid = {
4348 let trusted_tx = commitment_stats.tx.trust();
4349 let bitcoin_tx = trusted_tx.built_transaction();
4350 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4352 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4353 log_bytes!(msg.signature.serialize_compact()[..]),
4354 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4355 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4356 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4357 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4361 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4363 // If our counterparty updated the channel fee in this commitment transaction, check that
4364 // they can actually afford the new fee now.
4365 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4366 update_state == FeeUpdateState::RemoteAnnounced
4369 debug_assert!(!self.context.is_outbound());
4370 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4371 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4372 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4375 #[cfg(any(test, fuzzing))]
4377 if self.context.is_outbound() {
4378 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4379 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4380 if let Some(info) = projected_commit_tx_info {
4381 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4382 + self.context.holding_cell_htlc_updates.len();
4383 if info.total_pending_htlcs == total_pending_htlcs
4384 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4385 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4386 && info.feerate == self.context.feerate_per_kw {
4387 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4393 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4394 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4397 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4398 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4399 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4400 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4401 // backwards compatibility, we never use it in production. To provide test coverage, here,
4402 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4403 #[allow(unused_assignments, unused_mut)]
4404 let mut separate_nondust_htlc_sources = false;
4405 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4406 use core::hash::{BuildHasher, Hasher};
4407 // Get a random value using the only std API to do so - the DefaultHasher
4408 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4409 separate_nondust_htlc_sources = rand_val % 2 == 0;
4412 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4413 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4414 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4415 if let Some(_) = htlc.transaction_output_index {
4416 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4417 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4418 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4420 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4421 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4422 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4423 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4424 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4425 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4426 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4427 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4429 if !separate_nondust_htlc_sources {
4430 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4433 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4435 if separate_nondust_htlc_sources {
4436 if let Some(source) = source_opt.take() {
4437 nondust_htlc_sources.push(source);
4440 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4443 let holder_commitment_tx = HolderCommitmentTransaction::new(
4444 commitment_stats.tx,
4446 msg.htlc_signatures.clone(),
4447 &self.context.get_holder_pubkeys().funding_pubkey,
4448 self.context.counterparty_funding_pubkey()
4451 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4452 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4454 // Update state now that we've passed all the can-fail calls...
4455 let mut need_commitment = false;
4456 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4457 if *update_state == FeeUpdateState::RemoteAnnounced {
4458 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4459 need_commitment = true;
4463 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4464 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
4465 Some(forward_info.clone())
4467 if let Some(forward_info) = new_forward {
4468 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4469 &htlc.payment_hash, &self.context.channel_id);
4470 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
4471 need_commitment = true;
4474 let mut claimed_htlcs = Vec::new();
4475 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4476 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4477 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4478 &htlc.payment_hash, &self.context.channel_id);
4479 // Grab the preimage, if it exists, instead of cloning
4480 let mut reason = OutboundHTLCOutcome::Success(None);
4481 mem::swap(outcome, &mut reason);
4482 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4483 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4484 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4485 // have a `Success(None)` reason. In this case we could forget some HTLC
4486 // claims, but such an upgrade is unlikely and including claimed HTLCs here
4487 // fixes a bug which the user was exposed to on 0.0.104 when they started the
4489 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4491 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4492 need_commitment = true;
4496 self.context.latest_monitor_update_id += 1;
4497 let mut monitor_update = ChannelMonitorUpdate {
4498 update_id: self.context.latest_monitor_update_id,
4499 counterparty_node_id: Some(self.context.counterparty_node_id),
4500 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4501 commitment_tx: holder_commitment_tx,
4502 htlc_outputs: htlcs_and_sigs,
4504 nondust_htlc_sources,
4506 channel_id: Some(self.context.channel_id()),
4509 self.context.cur_holder_commitment_transaction_number -= 1;
4510 self.context.expecting_peer_commitment_signed = false;
4511 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4512 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4513 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4515 if self.context.channel_state.is_monitor_update_in_progress() {
4516 // In case we initially failed monitor updating without requiring a response, we need
4517 // to make sure the RAA gets sent first.
4518 self.context.monitor_pending_revoke_and_ack = true;
4519 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4520 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4521 // the corresponding HTLC status updates so that
4522 // get_last_commitment_update_for_send includes the right HTLCs.
4523 self.context.monitor_pending_commitment_signed = true;
4524 let mut additional_update = self.build_commitment_no_status_check(logger);
4525 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4526 // strictly increasing by one, so decrement it here.
4527 self.context.latest_monitor_update_id = monitor_update.update_id;
4528 monitor_update.updates.append(&mut additional_update.updates);
4530 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4531 &self.context.channel_id);
4532 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4535 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4536 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4537 // we'll send one right away when we get the revoke_and_ack when we
4538 // free_holding_cell_htlcs().
4539 let mut additional_update = self.build_commitment_no_status_check(logger);
4540 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4541 // strictly increasing by one, so decrement it here.
4542 self.context.latest_monitor_update_id = monitor_update.update_id;
4543 monitor_update.updates.append(&mut additional_update.updates);
4547 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4548 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4549 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4550 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4553 /// Public version of the below, checking relevant preconditions first.
4554 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4555 /// returns `(None, Vec::new())`.
4556 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4557 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4558 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4559 where F::Target: FeeEstimator, L::Target: Logger
4561 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4562 self.free_holding_cell_htlcs(fee_estimator, logger)
4563 } else { (None, Vec::new()) }
4566 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4567 /// for our counterparty.
4568 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4569 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4570 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4571 where F::Target: FeeEstimator, L::Target: Logger
4573 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4574 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4575 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4576 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4578 let mut monitor_update = ChannelMonitorUpdate {
4579 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4580 counterparty_node_id: Some(self.context.counterparty_node_id),
4581 updates: Vec::new(),
4582 channel_id: Some(self.context.channel_id()),
4585 let mut htlc_updates = Vec::new();
4586 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4587 let mut update_add_count = 0;
4588 let mut update_fulfill_count = 0;
4589 let mut update_fail_count = 0;
4590 let mut htlcs_to_fail = Vec::new();
4591 for htlc_update in htlc_updates.drain(..) {
4592 // Note that this *can* fail, though it should be due to rather-rare conditions on
4593 // fee races with adding too many outputs which push our total payments just over
4594 // the limit. In case it's less rare than I anticipate, we may want to revisit
4595 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4596 // to rebalance channels.
4597 let fail_htlc_res = match &htlc_update {
4598 &HTLCUpdateAwaitingACK::AddHTLC {
4599 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4600 skimmed_fee_msat, blinding_point, ..
4602 match self.send_htlc(
4603 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4604 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4606 Ok(_) => update_add_count += 1,
4609 ChannelError::Ignore(ref msg) => {
4610 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4611 // If we fail to send here, then this HTLC should
4612 // be failed backwards. Failing to send here
4613 // indicates that this HTLC may keep being put back
4614 // into the holding cell without ever being
4615 // successfully forwarded/failed/fulfilled, causing
4616 // our counterparty to eventually close on us.
4617 htlcs_to_fail.push((source.clone(), *payment_hash));
4620 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4627 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4628 // If an HTLC claim was previously added to the holding cell (via
4629 // `get_update_fulfill_htlc`, then generating the claim message itself must
4630 // not fail - any in between attempts to claim the HTLC will have resulted
4631 // in it hitting the holding cell again and we cannot change the state of a
4632 // holding cell HTLC from fulfill to anything else.
4633 let mut additional_monitor_update =
4634 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4635 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4636 { monitor_update } else { unreachable!() };
4637 update_fulfill_count += 1;
4638 monitor_update.updates.append(&mut additional_monitor_update.updates);
4641 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4642 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4643 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4645 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4646 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4647 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4650 if let Some(res) = fail_htlc_res {
4652 Ok(fail_msg_opt) => {
4653 // If an HTLC failure was previously added to the holding cell (via
4654 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4655 // not fail - we should never end up in a state where we double-fail
4656 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4657 // for a full revocation before failing.
4658 debug_assert!(fail_msg_opt.is_some());
4659 update_fail_count += 1;
4661 Err(ChannelError::Ignore(_)) => {},
4663 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4668 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4669 return (None, htlcs_to_fail);
4671 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4672 self.send_update_fee(feerate, false, fee_estimator, logger)
4677 let mut additional_update = self.build_commitment_no_status_check(logger);
4678 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4679 // but we want them to be strictly increasing by one, so reset it here.
4680 self.context.latest_monitor_update_id = monitor_update.update_id;
4681 monitor_update.updates.append(&mut additional_update.updates);
4683 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4684 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4685 update_add_count, update_fulfill_count, update_fail_count);
4687 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4688 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4694 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4695 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4696 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4697 /// generating an appropriate error *after* the channel state has been updated based on the
4698 /// revoke_and_ack message.
4699 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4700 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4701 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4702 where F::Target: FeeEstimator, L::Target: Logger,
4704 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4705 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4707 if self.context.channel_state.is_peer_disconnected() {
4708 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4710 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4711 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4714 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4716 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4717 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4718 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4722 if !self.context.channel_state.is_awaiting_remote_revoke() {
4723 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4724 // haven't given them a new commitment transaction to broadcast). We should probably
4725 // take advantage of this by updating our channel monitor, sending them an error, and
4726 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4727 // lot of work, and there's some chance this is all a misunderstanding anyway.
4728 // We have to do *something*, though, since our signer may get mad at us for otherwise
4729 // jumping a remote commitment number, so best to just force-close and move on.
4730 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4733 #[cfg(any(test, fuzzing))]
4735 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4736 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4739 match &self.context.holder_signer {
4740 ChannelSignerType::Ecdsa(ecdsa) => {
4741 ecdsa.validate_counterparty_revocation(
4742 self.context.cur_counterparty_commitment_transaction_number + 1,
4744 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4746 // TODO (taproot|arik)
4751 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4752 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4753 self.context.latest_monitor_update_id += 1;
4754 let mut monitor_update = ChannelMonitorUpdate {
4755 update_id: self.context.latest_monitor_update_id,
4756 counterparty_node_id: Some(self.context.counterparty_node_id),
4757 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4758 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4759 secret: msg.per_commitment_secret,
4761 channel_id: Some(self.context.channel_id()),
4764 // Update state now that we've passed all the can-fail calls...
4765 // (note that we may still fail to generate the new commitment_signed message, but that's
4766 // OK, we step the channel here and *then* if the new generation fails we can fail the
4767 // channel based on that, but stepping stuff here should be safe either way.
4768 self.context.channel_state.clear_awaiting_remote_revoke();
4769 self.context.sent_message_awaiting_response = None;
4770 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4771 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4772 self.context.cur_counterparty_commitment_transaction_number -= 1;
4774 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4775 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4778 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4779 let mut to_forward_infos = Vec::new();
4780 let mut revoked_htlcs = Vec::new();
4781 let mut finalized_claimed_htlcs = Vec::new();
4782 let mut update_fail_htlcs = Vec::new();
4783 let mut update_fail_malformed_htlcs = Vec::new();
4784 let mut require_commitment = false;
4785 let mut value_to_self_msat_diff: i64 = 0;
4788 // Take references explicitly so that we can hold multiple references to self.context.
4789 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4790 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4791 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4793 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4794 pending_inbound_htlcs.retain(|htlc| {
4795 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4796 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4797 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4798 value_to_self_msat_diff += htlc.amount_msat as i64;
4800 *expecting_peer_commitment_signed = true;
4804 pending_outbound_htlcs.retain(|htlc| {
4805 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4806 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4807 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4808 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4810 finalized_claimed_htlcs.push(htlc.source.clone());
4811 // They fulfilled, so we sent them money
4812 value_to_self_msat_diff -= htlc.amount_msat as i64;
4817 for htlc in pending_inbound_htlcs.iter_mut() {
4818 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4820 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4824 let mut state = InboundHTLCState::Committed;
4825 mem::swap(&mut state, &mut htlc.state);
4827 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4828 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4829 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4830 require_commitment = true;
4831 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4832 match forward_info {
4833 PendingHTLCStatus::Fail(fail_msg) => {
4834 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4835 require_commitment = true;
4837 HTLCFailureMsg::Relay(msg) => {
4838 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4839 update_fail_htlcs.push(msg)
4841 HTLCFailureMsg::Malformed(msg) => {
4842 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4843 update_fail_malformed_htlcs.push(msg)
4847 PendingHTLCStatus::Forward(forward_info) => {
4848 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4849 to_forward_infos.push((forward_info, htlc.htlc_id));
4850 htlc.state = InboundHTLCState::Committed;
4856 for htlc in pending_outbound_htlcs.iter_mut() {
4857 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4858 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4859 htlc.state = OutboundHTLCState::Committed;
4860 *expecting_peer_commitment_signed = true;
4862 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4863 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4864 // Grab the preimage, if it exists, instead of cloning
4865 let mut reason = OutboundHTLCOutcome::Success(None);
4866 mem::swap(outcome, &mut reason);
4867 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4868 require_commitment = true;
4872 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4874 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4875 match update_state {
4876 FeeUpdateState::Outbound => {
4877 debug_assert!(self.context.is_outbound());
4878 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4879 self.context.feerate_per_kw = feerate;
4880 self.context.pending_update_fee = None;
4881 self.context.expecting_peer_commitment_signed = true;
4883 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4884 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4885 debug_assert!(!self.context.is_outbound());
4886 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4887 require_commitment = true;
4888 self.context.feerate_per_kw = feerate;
4889 self.context.pending_update_fee = None;
4894 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4895 let release_state_str =
4896 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4897 macro_rules! return_with_htlcs_to_fail {
4898 ($htlcs_to_fail: expr) => {
4899 if !release_monitor {
4900 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4901 update: monitor_update,
4903 return Ok(($htlcs_to_fail, None));
4905 return Ok(($htlcs_to_fail, Some(monitor_update)));
4910 if self.context.channel_state.is_monitor_update_in_progress() {
4911 // We can't actually generate a new commitment transaction (incl by freeing holding
4912 // cells) while we can't update the monitor, so we just return what we have.
4913 if require_commitment {
4914 self.context.monitor_pending_commitment_signed = true;
4915 // When the monitor updating is restored we'll call
4916 // get_last_commitment_update_for_send(), which does not update state, but we're
4917 // definitely now awaiting a remote revoke before we can step forward any more, so
4919 let mut additional_update = self.build_commitment_no_status_check(logger);
4920 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4921 // strictly increasing by one, so decrement it here.
4922 self.context.latest_monitor_update_id = monitor_update.update_id;
4923 monitor_update.updates.append(&mut additional_update.updates);
4925 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4926 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4927 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4928 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4929 return_with_htlcs_to_fail!(Vec::new());
4932 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4933 (Some(mut additional_update), htlcs_to_fail) => {
4934 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4935 // strictly increasing by one, so decrement it here.
4936 self.context.latest_monitor_update_id = monitor_update.update_id;
4937 monitor_update.updates.append(&mut additional_update.updates);
4939 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4940 &self.context.channel_id(), release_state_str);
4942 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4943 return_with_htlcs_to_fail!(htlcs_to_fail);
4945 (None, htlcs_to_fail) => {
4946 if require_commitment {
4947 let mut additional_update = self.build_commitment_no_status_check(logger);
4949 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4950 // strictly increasing by one, so decrement it here.
4951 self.context.latest_monitor_update_id = monitor_update.update_id;
4952 monitor_update.updates.append(&mut additional_update.updates);
4954 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4955 &self.context.channel_id(),
4956 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4959 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4960 return_with_htlcs_to_fail!(htlcs_to_fail);
4962 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4963 &self.context.channel_id(), release_state_str);
4965 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4966 return_with_htlcs_to_fail!(htlcs_to_fail);
4972 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4973 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4974 /// commitment update.
4975 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4976 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4977 where F::Target: FeeEstimator, L::Target: Logger
4979 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4980 assert!(msg_opt.is_none(), "We forced holding cell?");
4983 /// Adds a pending update to this channel. See the doc for send_htlc for
4984 /// further details on the optionness of the return value.
4985 /// If our balance is too low to cover the cost of the next commitment transaction at the
4986 /// new feerate, the update is cancelled.
4988 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4989 /// [`Channel`] if `force_holding_cell` is false.
4990 fn send_update_fee<F: Deref, L: Deref>(
4991 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4992 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4993 ) -> Option<msgs::UpdateFee>
4994 where F::Target: FeeEstimator, L::Target: Logger
4996 if !self.context.is_outbound() {
4997 panic!("Cannot send fee from inbound channel");
4999 if !self.context.is_usable() {
5000 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
5002 if !self.context.is_live() {
5003 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
5006 // Before proposing a feerate update, check that we can actually afford the new fee.
5007 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
5008 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
5009 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
5010 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
5011 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
5012 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
5013 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
5014 //TODO: auto-close after a number of failures?
5015 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
5019 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
5020 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5021 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5022 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5023 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5024 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5027 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5028 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5032 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
5033 force_holding_cell = true;
5036 if force_holding_cell {
5037 self.context.holding_cell_update_fee = Some(feerate_per_kw);
5041 debug_assert!(self.context.pending_update_fee.is_none());
5042 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
5044 Some(msgs::UpdateFee {
5045 channel_id: self.context.channel_id,
5050 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
5051 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
5053 /// No further message handling calls may be made until a channel_reestablish dance has
5055 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5056 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5057 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5058 if self.context.channel_state.is_pre_funded_state() {
5062 if self.context.channel_state.is_peer_disconnected() {
5063 // While the below code should be idempotent, it's simpler to just return early, as
5064 // redundant disconnect events can fire, though they should be rare.
5068 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5069 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5072 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5073 // will be retransmitted.
5074 self.context.last_sent_closing_fee = None;
5075 self.context.pending_counterparty_closing_signed = None;
5076 self.context.closing_fee_limits = None;
5078 let mut inbound_drop_count = 0;
5079 self.context.pending_inbound_htlcs.retain(|htlc| {
5081 InboundHTLCState::RemoteAnnounced(_) => {
5082 // They sent us an update_add_htlc but we never got the commitment_signed.
5083 // We'll tell them what commitment_signed we're expecting next and they'll drop
5084 // this HTLC accordingly
5085 inbound_drop_count += 1;
5088 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5089 // We received a commitment_signed updating this HTLC and (at least hopefully)
5090 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5091 // in response to it yet, so don't touch it.
5094 InboundHTLCState::Committed => true,
5095 InboundHTLCState::LocalRemoved(_) => {
5096 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5097 // re-transmit if needed) and they may have even sent a revoke_and_ack back
5098 // (that we missed). Keep this around for now and if they tell us they missed
5099 // the commitment_signed we can re-transmit the update then.
5104 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5106 if let Some((_, update_state)) = self.context.pending_update_fee {
5107 if update_state == FeeUpdateState::RemoteAnnounced {
5108 debug_assert!(!self.context.is_outbound());
5109 self.context.pending_update_fee = None;
5113 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5114 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5115 // They sent us an update to remove this but haven't yet sent the corresponding
5116 // commitment_signed, we need to move it back to Committed and they can re-send
5117 // the update upon reconnection.
5118 htlc.state = OutboundHTLCState::Committed;
5122 self.context.sent_message_awaiting_response = None;
5124 self.context.channel_state.set_peer_disconnected();
5125 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5129 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5130 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5131 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5132 /// update completes (potentially immediately).
5133 /// The messages which were generated with the monitor update must *not* have been sent to the
5134 /// remote end, and must instead have been dropped. They will be regenerated when
5135 /// [`Self::monitor_updating_restored`] is called.
5137 /// [`ChannelManager`]: super::channelmanager::ChannelManager
5138 /// [`chain::Watch`]: crate::chain::Watch
5139 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5140 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5141 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5142 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5143 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5145 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5146 self.context.monitor_pending_commitment_signed |= resend_commitment;
5147 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5148 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5149 self.context.monitor_pending_failures.append(&mut pending_fails);
5150 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5151 self.context.channel_state.set_monitor_update_in_progress();
5154 /// Indicates that the latest ChannelMonitor update has been committed by the client
5155 /// successfully and we should restore normal operation. Returns messages which should be sent
5156 /// to the remote side.
5157 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5158 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5159 user_config: &UserConfig, best_block_height: u32
5160 ) -> MonitorRestoreUpdates
5163 NS::Target: NodeSigner
5165 assert!(self.context.channel_state.is_monitor_update_in_progress());
5166 self.context.channel_state.clear_monitor_update_in_progress();
5168 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5169 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5170 // first received the funding_signed.
5171 let mut funding_broadcastable =
5172 if self.context.is_outbound() &&
5173 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5174 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5176 self.context.funding_transaction.take()
5178 // That said, if the funding transaction is already confirmed (ie we're active with a
5179 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5180 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5181 funding_broadcastable = None;
5184 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5185 // (and we assume the user never directly broadcasts the funding transaction and waits for
5186 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5187 // * an inbound channel that failed to persist the monitor on funding_created and we got
5188 // the funding transaction confirmed before the monitor was persisted, or
5189 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5190 let channel_ready = if self.context.monitor_pending_channel_ready {
5191 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5192 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5193 self.context.monitor_pending_channel_ready = false;
5194 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5195 Some(msgs::ChannelReady {
5196 channel_id: self.context.channel_id(),
5197 next_per_commitment_point,
5198 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5202 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5204 let mut accepted_htlcs = Vec::new();
5205 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5206 let mut failed_htlcs = Vec::new();
5207 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5208 let mut finalized_claimed_htlcs = Vec::new();
5209 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5211 if self.context.channel_state.is_peer_disconnected() {
5212 self.context.monitor_pending_revoke_and_ack = false;
5213 self.context.monitor_pending_commitment_signed = false;
5214 return MonitorRestoreUpdates {
5215 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5216 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5220 let raa = if self.context.monitor_pending_revoke_and_ack {
5221 Some(self.get_last_revoke_and_ack())
5223 let commitment_update = if self.context.monitor_pending_commitment_signed {
5224 self.get_last_commitment_update_for_send(logger).ok()
5226 if commitment_update.is_some() {
5227 self.mark_awaiting_response();
5230 self.context.monitor_pending_revoke_and_ack = false;
5231 self.context.monitor_pending_commitment_signed = false;
5232 let order = self.context.resend_order.clone();
5233 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5234 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5235 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5236 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5237 MonitorRestoreUpdates {
5238 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5242 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5243 where F::Target: FeeEstimator, L::Target: Logger
5245 if self.context.is_outbound() {
5246 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5248 if self.context.channel_state.is_peer_disconnected() {
5249 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5251 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5253 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5254 self.context.update_time_counter += 1;
5255 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5256 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
5257 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
5258 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
5259 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5260 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5261 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5262 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5263 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5264 msg.feerate_per_kw, holder_tx_dust_exposure)));
5266 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5267 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5268 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
5274 /// Indicates that the signer may have some signatures for us, so we should retry if we're
5276 #[cfg(async_signing)]
5277 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5278 let commitment_update = if self.context.signer_pending_commitment_update {
5279 self.get_last_commitment_update_for_send(logger).ok()
5281 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5282 self.context.get_funding_signed_msg(logger).1
5284 let channel_ready = if funding_signed.is_some() {
5285 self.check_get_channel_ready(0)
5288 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5289 if commitment_update.is_some() { "a" } else { "no" },
5290 if funding_signed.is_some() { "a" } else { "no" },
5291 if channel_ready.is_some() { "a" } else { "no" });
5293 SignerResumeUpdates {
5300 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5301 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5302 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5303 msgs::RevokeAndACK {
5304 channel_id: self.context.channel_id,
5305 per_commitment_secret,
5306 next_per_commitment_point,
5308 next_local_nonce: None,
5312 /// Gets the last commitment update for immediate sending to our peer.
5313 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5314 let mut update_add_htlcs = Vec::new();
5315 let mut update_fulfill_htlcs = Vec::new();
5316 let mut update_fail_htlcs = Vec::new();
5317 let mut update_fail_malformed_htlcs = Vec::new();
5319 for htlc in self.context.pending_outbound_htlcs.iter() {
5320 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5321 update_add_htlcs.push(msgs::UpdateAddHTLC {
5322 channel_id: self.context.channel_id(),
5323 htlc_id: htlc.htlc_id,
5324 amount_msat: htlc.amount_msat,
5325 payment_hash: htlc.payment_hash,
5326 cltv_expiry: htlc.cltv_expiry,
5327 onion_routing_packet: (**onion_packet).clone(),
5328 skimmed_fee_msat: htlc.skimmed_fee_msat,
5329 blinding_point: htlc.blinding_point,
5334 for htlc in self.context.pending_inbound_htlcs.iter() {
5335 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5337 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5338 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5339 channel_id: self.context.channel_id(),
5340 htlc_id: htlc.htlc_id,
5341 reason: err_packet.clone()
5344 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5345 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5346 channel_id: self.context.channel_id(),
5347 htlc_id: htlc.htlc_id,
5348 sha256_of_onion: sha256_of_onion.clone(),
5349 failure_code: failure_code.clone(),
5352 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5353 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5354 channel_id: self.context.channel_id(),
5355 htlc_id: htlc.htlc_id,
5356 payment_preimage: payment_preimage.clone(),
5363 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5364 Some(msgs::UpdateFee {
5365 channel_id: self.context.channel_id(),
5366 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5370 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5371 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5372 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5373 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5374 if self.context.signer_pending_commitment_update {
5375 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5376 self.context.signer_pending_commitment_update = false;
5380 #[cfg(not(async_signing))] {
5381 panic!("Failed to get signature for new commitment state");
5383 #[cfg(async_signing)] {
5384 if !self.context.signer_pending_commitment_update {
5385 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5386 self.context.signer_pending_commitment_update = true;
5391 Ok(msgs::CommitmentUpdate {
5392 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5397 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5398 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5399 if self.context.channel_state.is_local_shutdown_sent() {
5400 assert!(self.context.shutdown_scriptpubkey.is_some());
5401 Some(msgs::Shutdown {
5402 channel_id: self.context.channel_id,
5403 scriptpubkey: self.get_closing_scriptpubkey(),
5408 /// May panic if some calls other than message-handling calls (which will all Err immediately)
5409 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5411 /// Some links printed in log lines are included here to check them during build (when run with
5412 /// `cargo doc --document-private-items`):
5413 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5414 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5415 pub fn channel_reestablish<L: Deref, NS: Deref>(
5416 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5417 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5418 ) -> Result<ReestablishResponses, ChannelError>
5421 NS::Target: NodeSigner
5423 if !self.context.channel_state.is_peer_disconnected() {
5424 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5425 // almost certainly indicates we are going to end up out-of-sync in some way, so we
5426 // just close here instead of trying to recover.
5427 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5430 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5431 msg.next_local_commitment_number == 0 {
5432 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5435 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5436 if msg.next_remote_commitment_number > 0 {
5437 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5438 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5439 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5440 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5441 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5443 if msg.next_remote_commitment_number > our_commitment_transaction {
5444 macro_rules! log_and_panic {
5445 ($err_msg: expr) => {
5446 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5447 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5450 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5451 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5452 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5453 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5454 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5455 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5456 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5457 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5461 // Before we change the state of the channel, we check if the peer is sending a very old
5462 // commitment transaction number, if yes we send a warning message.
5463 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5464 return Err(ChannelError::Warn(format!(
5465 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5466 msg.next_remote_commitment_number,
5467 our_commitment_transaction
5471 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5472 // remaining cases either succeed or ErrorMessage-fail).
5473 self.context.channel_state.clear_peer_disconnected();
5474 self.context.sent_message_awaiting_response = None;
5476 let shutdown_msg = self.get_outbound_shutdown();
5478 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
5480 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5481 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5482 if !self.context.channel_state.is_our_channel_ready() ||
5483 self.context.channel_state.is_monitor_update_in_progress() {
5484 if msg.next_remote_commitment_number != 0 {
5485 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5487 // Short circuit the whole handler as there is nothing we can resend them
5488 return Ok(ReestablishResponses {
5489 channel_ready: None,
5490 raa: None, commitment_update: None,
5491 order: RAACommitmentOrder::CommitmentFirst,
5492 shutdown_msg, announcement_sigs,
5496 // We have OurChannelReady set!
5497 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5498 return Ok(ReestablishResponses {
5499 channel_ready: Some(msgs::ChannelReady {
5500 channel_id: self.context.channel_id(),
5501 next_per_commitment_point,
5502 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5504 raa: None, commitment_update: None,
5505 order: RAACommitmentOrder::CommitmentFirst,
5506 shutdown_msg, announcement_sigs,
5510 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5511 // Remote isn't waiting on any RevokeAndACK from us!
5512 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5514 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5515 if self.context.channel_state.is_monitor_update_in_progress() {
5516 self.context.monitor_pending_revoke_and_ack = true;
5519 Some(self.get_last_revoke_and_ack())
5522 debug_assert!(false, "All values should have been handled in the four cases above");
5523 return Err(ChannelError::Close(format!(
5524 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5525 msg.next_remote_commitment_number,
5526 our_commitment_transaction
5530 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5531 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5532 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5533 // the corresponding revoke_and_ack back yet.
5534 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5535 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5536 self.mark_awaiting_response();
5538 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5540 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5541 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5542 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5543 Some(msgs::ChannelReady {
5544 channel_id: self.context.channel_id(),
5545 next_per_commitment_point,
5546 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5550 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5551 if required_revoke.is_some() {
5552 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5554 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5557 Ok(ReestablishResponses {
5558 channel_ready, shutdown_msg, announcement_sigs,
5559 raa: required_revoke,
5560 commitment_update: None,
5561 order: self.context.resend_order.clone(),
5563 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5564 if required_revoke.is_some() {
5565 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5567 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5570 if self.context.channel_state.is_monitor_update_in_progress() {
5571 self.context.monitor_pending_commitment_signed = true;
5572 Ok(ReestablishResponses {
5573 channel_ready, shutdown_msg, announcement_sigs,
5574 commitment_update: None, raa: None,
5575 order: self.context.resend_order.clone(),
5578 Ok(ReestablishResponses {
5579 channel_ready, shutdown_msg, announcement_sigs,
5580 raa: required_revoke,
5581 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5582 order: self.context.resend_order.clone(),
5585 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5586 Err(ChannelError::Close(format!(
5587 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5588 msg.next_local_commitment_number,
5589 next_counterparty_commitment_number,
5592 Err(ChannelError::Close(format!(
5593 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5594 msg.next_local_commitment_number,
5595 next_counterparty_commitment_number,
5600 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5601 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5602 /// at which point they will be recalculated.
5603 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5605 where F::Target: FeeEstimator
5607 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5609 // Propose a range from our current Background feerate to our Normal feerate plus our
5610 // force_close_avoidance_max_fee_satoshis.
5611 // If we fail to come to consensus, we'll have to force-close.
5612 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5613 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5614 // that we don't expect to need fee bumping
5615 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5616 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5618 // The spec requires that (when the channel does not have anchors) we only send absolute
5619 // channel fees no greater than the absolute channel fee on the current commitment
5620 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5621 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5622 // some force-closure by old nodes, but we wanted to close the channel anyway.
5624 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5625 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5626 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5627 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5630 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5631 // below our dust limit, causing the output to disappear. We don't bother handling this
5632 // case, however, as this should only happen if a channel is closed before any (material)
5633 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5634 // come to consensus with our counterparty on appropriate fees, however it should be a
5635 // relatively rare case. We can revisit this later, though note that in order to determine
5636 // if the funders' output is dust we have to know the absolute fee we're going to use.
5637 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5638 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5639 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5640 // We always add force_close_avoidance_max_fee_satoshis to our normal
5641 // feerate-calculated fee, but allow the max to be overridden if we're using a
5642 // target feerate-calculated fee.
5643 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5644 proposed_max_feerate as u64 * tx_weight / 1000)
5646 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5649 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5650 self.context.closing_fee_limits.clone().unwrap()
5653 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5654 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5655 /// this point if we're the funder we should send the initial closing_signed, and in any case
5656 /// shutdown should complete within a reasonable timeframe.
5657 fn closing_negotiation_ready(&self) -> bool {
5658 self.context.closing_negotiation_ready()
5661 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5662 /// an Err if no progress is being made and the channel should be force-closed instead.
5663 /// Should be called on a one-minute timer.
5664 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5665 if self.closing_negotiation_ready() {
5666 if self.context.closing_signed_in_flight {
5667 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5669 self.context.closing_signed_in_flight = true;
5675 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5676 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5677 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5678 where F::Target: FeeEstimator, L::Target: Logger
5680 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5681 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5682 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5683 // that closing_negotiation_ready checks this case (as well as a few others).
5684 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5685 return Ok((None, None, None));
5688 if !self.context.is_outbound() {
5689 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5690 return self.closing_signed(fee_estimator, &msg);
5692 return Ok((None, None, None));
5695 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5696 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5697 if self.context.expecting_peer_commitment_signed {
5698 return Ok((None, None, None));
5701 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5703 assert!(self.context.shutdown_scriptpubkey.is_some());
5704 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5705 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5706 our_min_fee, our_max_fee, total_fee_satoshis);
5708 match &self.context.holder_signer {
5709 ChannelSignerType::Ecdsa(ecdsa) => {
5711 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5712 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5714 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5715 Ok((Some(msgs::ClosingSigned {
5716 channel_id: self.context.channel_id,
5717 fee_satoshis: total_fee_satoshis,
5719 fee_range: Some(msgs::ClosingSignedFeeRange {
5720 min_fee_satoshis: our_min_fee,
5721 max_fee_satoshis: our_max_fee,
5725 // TODO (taproot|arik)
5731 // Marks a channel as waiting for a response from the counterparty. If it's not received
5732 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5734 fn mark_awaiting_response(&mut self) {
5735 self.context.sent_message_awaiting_response = Some(0);
5738 /// Determines whether we should disconnect the counterparty due to not receiving a response
5739 /// within our expected timeframe.
5741 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5742 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5743 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5746 // Don't disconnect when we're not waiting on a response.
5749 *ticks_elapsed += 1;
5750 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5754 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5755 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5757 if self.context.channel_state.is_peer_disconnected() {
5758 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5760 if self.context.channel_state.is_pre_funded_state() {
5761 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5762 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5763 // can do that via error message without getting a connection fail anyway...
5764 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5766 for htlc in self.context.pending_inbound_htlcs.iter() {
5767 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5768 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5771 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5773 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5774 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5777 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5778 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5779 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5782 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5785 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5786 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5787 // any further commitment updates after we set LocalShutdownSent.
5788 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5790 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5793 assert!(send_shutdown);
5794 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5795 Ok(scriptpubkey) => scriptpubkey,
5796 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5798 if !shutdown_scriptpubkey.is_compatible(their_features) {
5799 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5801 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5806 // From here on out, we may not fail!
5808 self.context.channel_state.set_remote_shutdown_sent();
5809 self.context.update_time_counter += 1;
5811 let monitor_update = if update_shutdown_script {
5812 self.context.latest_monitor_update_id += 1;
5813 let monitor_update = ChannelMonitorUpdate {
5814 update_id: self.context.latest_monitor_update_id,
5815 counterparty_node_id: Some(self.context.counterparty_node_id),
5816 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5817 scriptpubkey: self.get_closing_scriptpubkey(),
5819 channel_id: Some(self.context.channel_id()),
5821 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5822 self.push_ret_blockable_mon_update(monitor_update)
5824 let shutdown = if send_shutdown {
5825 Some(msgs::Shutdown {
5826 channel_id: self.context.channel_id,
5827 scriptpubkey: self.get_closing_scriptpubkey(),
5831 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5832 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5833 // cell HTLCs and return them to fail the payment.
5834 self.context.holding_cell_update_fee = None;
5835 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5836 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5838 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5839 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5846 self.context.channel_state.set_local_shutdown_sent();
5847 self.context.update_time_counter += 1;
5849 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5852 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5853 let mut tx = closing_tx.trust().built_transaction().clone();
5855 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5857 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5858 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5859 let mut holder_sig = sig.serialize_der().to_vec();
5860 holder_sig.push(EcdsaSighashType::All as u8);
5861 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5862 cp_sig.push(EcdsaSighashType::All as u8);
5863 if funding_key[..] < counterparty_funding_key[..] {
5864 tx.input[0].witness.push(holder_sig);
5865 tx.input[0].witness.push(cp_sig);
5867 tx.input[0].witness.push(cp_sig);
5868 tx.input[0].witness.push(holder_sig);
5871 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5875 pub fn closing_signed<F: Deref>(
5876 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5877 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5878 where F::Target: FeeEstimator
5880 if !self.context.channel_state.is_both_sides_shutdown() {
5881 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5883 if self.context.channel_state.is_peer_disconnected() {
5884 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5886 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5887 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5889 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5890 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5893 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5894 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5897 if self.context.channel_state.is_monitor_update_in_progress() {
5898 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5899 return Ok((None, None, None));
5902 let funding_redeemscript = self.context.get_funding_redeemscript();
5903 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5904 if used_total_fee != msg.fee_satoshis {
5905 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5907 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5909 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5912 // The remote end may have decided to revoke their output due to inconsistent dust
5913 // limits, so check for that case by re-checking the signature here.
5914 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5915 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5916 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5920 for outp in closing_tx.trust().built_transaction().output.iter() {
5921 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5922 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5926 let closure_reason = if self.initiated_shutdown() {
5927 ClosureReason::LocallyInitiatedCooperativeClosure
5929 ClosureReason::CounterpartyInitiatedCooperativeClosure
5932 assert!(self.context.shutdown_scriptpubkey.is_some());
5933 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5934 if last_fee == msg.fee_satoshis {
5935 let shutdown_result = ShutdownResult {
5937 monitor_update: None,
5938 dropped_outbound_htlcs: Vec::new(),
5939 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5940 channel_id: self.context.channel_id,
5941 user_channel_id: self.context.user_id,
5942 channel_capacity_satoshis: self.context.channel_value_satoshis,
5943 counterparty_node_id: self.context.counterparty_node_id,
5944 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5945 channel_funding_txo: self.context.get_funding_txo(),
5947 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5948 self.context.channel_state = ChannelState::ShutdownComplete;
5949 self.context.update_time_counter += 1;
5950 return Ok((None, Some(tx), Some(shutdown_result)));
5954 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5956 macro_rules! propose_fee {
5957 ($new_fee: expr) => {
5958 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5959 (closing_tx, $new_fee)
5961 self.build_closing_transaction($new_fee, false)
5964 return match &self.context.holder_signer {
5965 ChannelSignerType::Ecdsa(ecdsa) => {
5967 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5968 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5969 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5970 let shutdown_result = ShutdownResult {
5972 monitor_update: None,
5973 dropped_outbound_htlcs: Vec::new(),
5974 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5975 channel_id: self.context.channel_id,
5976 user_channel_id: self.context.user_id,
5977 channel_capacity_satoshis: self.context.channel_value_satoshis,
5978 counterparty_node_id: self.context.counterparty_node_id,
5979 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5980 channel_funding_txo: self.context.get_funding_txo(),
5982 self.context.channel_state = ChannelState::ShutdownComplete;
5983 self.context.update_time_counter += 1;
5984 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5985 (Some(tx), Some(shutdown_result))
5990 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5991 Ok((Some(msgs::ClosingSigned {
5992 channel_id: self.context.channel_id,
5993 fee_satoshis: used_fee,
5995 fee_range: Some(msgs::ClosingSignedFeeRange {
5996 min_fee_satoshis: our_min_fee,
5997 max_fee_satoshis: our_max_fee,
5999 }), signed_tx, shutdown_result))
6001 // TODO (taproot|arik)
6008 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
6009 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
6010 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
6012 if max_fee_satoshis < our_min_fee {
6013 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
6015 if min_fee_satoshis > our_max_fee {
6016 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
6019 if !self.context.is_outbound() {
6020 // They have to pay, so pick the highest fee in the overlapping range.
6021 // We should never set an upper bound aside from their full balance
6022 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
6023 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
6025 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
6026 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
6027 msg.fee_satoshis, our_min_fee, our_max_fee)));
6029 // The proposed fee is in our acceptable range, accept it and broadcast!
6030 propose_fee!(msg.fee_satoshis);
6033 // Old fee style negotiation. We don't bother to enforce whether they are complying
6034 // with the "making progress" requirements, we just comply and hope for the best.
6035 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
6036 if msg.fee_satoshis > last_fee {
6037 if msg.fee_satoshis < our_max_fee {
6038 propose_fee!(msg.fee_satoshis);
6039 } else if last_fee < our_max_fee {
6040 propose_fee!(our_max_fee);
6042 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
6045 if msg.fee_satoshis > our_min_fee {
6046 propose_fee!(msg.fee_satoshis);
6047 } else if last_fee > our_min_fee {
6048 propose_fee!(our_min_fee);
6050 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
6054 if msg.fee_satoshis < our_min_fee {
6055 propose_fee!(our_min_fee);
6056 } else if msg.fee_satoshis > our_max_fee {
6057 propose_fee!(our_max_fee);
6059 propose_fee!(msg.fee_satoshis);
6065 fn internal_htlc_satisfies_config(
6066 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6067 ) -> Result<(), (&'static str, u16)> {
6068 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6069 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6070 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6071 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6073 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6074 0x1000 | 12, // fee_insufficient
6077 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6079 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6080 0x1000 | 13, // incorrect_cltv_expiry
6086 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6087 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6088 /// unsuccessful, falls back to the previous one if one exists.
6089 pub fn htlc_satisfies_config(
6090 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6091 ) -> Result<(), (&'static str, u16)> {
6092 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6094 if let Some(prev_config) = self.context.prev_config() {
6095 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6102 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6103 self.context.cur_holder_commitment_transaction_number + 1
6106 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6107 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6110 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6111 self.context.cur_counterparty_commitment_transaction_number + 2
6115 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6116 &self.context.holder_signer
6120 pub fn get_value_stat(&self) -> ChannelValueStat {
6122 value_to_self_msat: self.context.value_to_self_msat,
6123 channel_value_msat: self.context.channel_value_satoshis * 1000,
6124 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6125 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6126 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6127 holding_cell_outbound_amount_msat: {
6129 for h in self.context.holding_cell_htlc_updates.iter() {
6131 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6139 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6140 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6144 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6145 /// Allowed in any state (including after shutdown)
6146 pub fn is_awaiting_monitor_update(&self) -> bool {
6147 self.context.channel_state.is_monitor_update_in_progress()
6150 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6151 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6152 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6153 self.context.blocked_monitor_updates[0].update.update_id - 1
6156 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6157 /// further blocked monitor update exists after the next.
6158 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6159 if self.context.blocked_monitor_updates.is_empty() { return None; }
6160 Some((self.context.blocked_monitor_updates.remove(0).update,
6161 !self.context.blocked_monitor_updates.is_empty()))
6164 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6165 /// immediately given to the user for persisting or `None` if it should be held as blocked.
6166 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6167 -> Option<ChannelMonitorUpdate> {
6168 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6169 if !release_monitor {
6170 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6179 pub fn blocked_monitor_updates_pending(&self) -> usize {
6180 self.context.blocked_monitor_updates.len()
6183 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6184 /// If the channel is outbound, this implies we have not yet broadcasted the funding
6185 /// transaction. If the channel is inbound, this implies simply that the channel has not
6187 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6188 if !self.is_awaiting_monitor_update() { return false; }
6190 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6191 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6193 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6194 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6195 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6198 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6199 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6200 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6201 // waiting for the initial monitor persistence. Thus, we check if our commitment
6202 // transaction numbers have both been iterated only exactly once (for the
6203 // funding_signed), and we're awaiting monitor update.
6205 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6206 // only way to get an awaiting-monitor-update state during initial funding is if the
6207 // initial monitor persistence is still pending).
6209 // Because deciding we're awaiting initial broadcast spuriously could result in
6210 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6211 // we hard-assert here, even in production builds.
6212 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6213 assert!(self.context.monitor_pending_channel_ready);
6214 assert_eq!(self.context.latest_monitor_update_id, 0);
6220 /// Returns true if our channel_ready has been sent
6221 pub fn is_our_channel_ready(&self) -> bool {
6222 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6223 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6226 /// Returns true if our peer has either initiated or agreed to shut down the channel.
6227 pub fn received_shutdown(&self) -> bool {
6228 self.context.channel_state.is_remote_shutdown_sent()
6231 /// Returns true if we either initiated or agreed to shut down the channel.
6232 pub fn sent_shutdown(&self) -> bool {
6233 self.context.channel_state.is_local_shutdown_sent()
6236 /// Returns true if we initiated to shut down the channel.
6237 pub fn initiated_shutdown(&self) -> bool {
6238 self.context.local_initiated_shutdown.is_some()
6241 /// Returns true if this channel is fully shut down. True here implies that no further actions
6242 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6243 /// will be handled appropriately by the chain monitor.
6244 pub fn is_shutdown(&self) -> bool {
6245 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6248 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6249 self.context.channel_update_status
6252 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6253 self.context.update_time_counter += 1;
6254 self.context.channel_update_status = status;
6257 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6259 // * always when a new block/transactions are confirmed with the new height
6260 // * when funding is signed with a height of 0
6261 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6265 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6266 if funding_tx_confirmations <= 0 {
6267 self.context.funding_tx_confirmation_height = 0;
6270 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6274 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6275 // channel_ready yet.
6276 if self.context.signer_pending_funding {
6280 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6281 // channel_ready until the entire batch is ready.
6282 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6283 self.context.channel_state.set_our_channel_ready();
6285 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6286 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6287 self.context.update_time_counter += 1;
6289 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6290 // We got a reorg but not enough to trigger a force close, just ignore.
6293 if self.context.funding_tx_confirmation_height != 0 &&
6294 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6296 // We should never see a funding transaction on-chain until we've received
6297 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6298 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6299 // however, may do this and we shouldn't treat it as a bug.
6300 #[cfg(not(fuzzing))]
6301 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6302 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6303 self.context.channel_state.to_u32());
6305 // We got a reorg but not enough to trigger a force close, just ignore.
6309 if need_commitment_update {
6310 if !self.context.channel_state.is_monitor_update_in_progress() {
6311 if !self.context.channel_state.is_peer_disconnected() {
6312 let next_per_commitment_point =
6313 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6314 return Some(msgs::ChannelReady {
6315 channel_id: self.context.channel_id,
6316 next_per_commitment_point,
6317 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6321 self.context.monitor_pending_channel_ready = true;
6327 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6328 /// In the first case, we store the confirmation height and calculating the short channel id.
6329 /// In the second, we simply return an Err indicating we need to be force-closed now.
6330 pub fn transactions_confirmed<NS: Deref, L: Deref>(
6331 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6332 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6333 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6335 NS::Target: NodeSigner,
6338 let mut msgs = (None, None);
6339 if let Some(funding_txo) = self.context.get_funding_txo() {
6340 for &(index_in_block, tx) in txdata.iter() {
6341 // Check if the transaction is the expected funding transaction, and if it is,
6342 // check that it pays the right amount to the right script.
6343 if self.context.funding_tx_confirmation_height == 0 {
6344 if tx.txid() == funding_txo.txid {
6345 let txo_idx = funding_txo.index as usize;
6346 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6347 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6348 if self.context.is_outbound() {
6349 // If we generated the funding transaction and it doesn't match what it
6350 // should, the client is really broken and we should just panic and
6351 // tell them off. That said, because hash collisions happen with high
6352 // probability in fuzzing mode, if we're fuzzing we just close the
6353 // channel and move on.
6354 #[cfg(not(fuzzing))]
6355 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6357 self.context.update_time_counter += 1;
6358 let err_reason = "funding tx had wrong script/value or output index";
6359 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6361 if self.context.is_outbound() {
6362 if !tx.is_coin_base() {
6363 for input in tx.input.iter() {
6364 if input.witness.is_empty() {
6365 // We generated a malleable funding transaction, implying we've
6366 // just exposed ourselves to funds loss to our counterparty.
6367 #[cfg(not(fuzzing))]
6368 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6373 self.context.funding_tx_confirmation_height = height;
6374 self.context.funding_tx_confirmed_in = Some(*block_hash);
6375 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6376 Ok(scid) => Some(scid),
6377 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6380 // If this is a coinbase transaction and not a 0-conf channel
6381 // we should update our min_depth to 100 to handle coinbase maturity
6382 if tx.is_coin_base() &&
6383 self.context.minimum_depth.unwrap_or(0) > 0 &&
6384 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6385 self.context.minimum_depth = Some(COINBASE_MATURITY);
6388 // If we allow 1-conf funding, we may need to check for channel_ready here and
6389 // send it immediately instead of waiting for a best_block_updated call (which
6390 // may have already happened for this block).
6391 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6392 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6393 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6394 msgs = (Some(channel_ready), announcement_sigs);
6397 for inp in tx.input.iter() {
6398 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6399 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6400 return Err(ClosureReason::CommitmentTxConfirmed);
6408 /// When a new block is connected, we check the height of the block against outbound holding
6409 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6410 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6411 /// handled by the ChannelMonitor.
6413 /// If we return Err, the channel may have been closed, at which point the standard
6414 /// requirements apply - no calls may be made except those explicitly stated to be allowed
6417 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6419 pub fn best_block_updated<NS: Deref, L: Deref>(
6420 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6421 node_signer: &NS, user_config: &UserConfig, logger: &L
6422 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6424 NS::Target: NodeSigner,
6427 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6430 fn do_best_block_updated<NS: Deref, L: Deref>(
6431 &mut self, height: u32, highest_header_time: u32,
6432 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6433 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6435 NS::Target: NodeSigner,
6438 let mut timed_out_htlcs = Vec::new();
6439 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6440 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6442 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6443 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6445 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6446 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6447 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6455 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6457 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6458 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6459 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6461 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6462 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6465 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6466 self.context.channel_state.is_our_channel_ready() {
6467 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6468 if self.context.funding_tx_confirmation_height == 0 {
6469 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6470 // zero if it has been reorged out, however in either case, our state flags
6471 // indicate we've already sent a channel_ready
6472 funding_tx_confirmations = 0;
6475 // If we've sent channel_ready (or have both sent and received channel_ready), and
6476 // the funding transaction has become unconfirmed,
6477 // close the channel and hope we can get the latest state on chain (because presumably
6478 // the funding transaction is at least still in the mempool of most nodes).
6480 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6481 // 0-conf channel, but not doing so may lead to the
6482 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
6484 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6485 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6486 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6487 return Err(ClosureReason::ProcessingError { err: err_reason });
6489 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6490 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6491 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6492 // If funding_tx_confirmed_in is unset, the channel must not be active
6493 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6494 assert!(!self.context.channel_state.is_our_channel_ready());
6495 return Err(ClosureReason::FundingTimedOut);
6498 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6499 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6501 Ok((None, timed_out_htlcs, announcement_sigs))
6504 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6505 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6506 /// before the channel has reached channel_ready and we can just wait for more blocks.
6507 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6508 if self.context.funding_tx_confirmation_height != 0 {
6509 // We handle the funding disconnection by calling best_block_updated with a height one
6510 // below where our funding was connected, implying a reorg back to conf_height - 1.
6511 let reorg_height = self.context.funding_tx_confirmation_height - 1;
6512 // We use the time field to bump the current time we set on channel updates if its
6513 // larger. If we don't know that time has moved forward, we can just set it to the last
6514 // time we saw and it will be ignored.
6515 let best_time = self.context.update_time_counter;
6516 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6517 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6518 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6519 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6520 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6526 // We never learned about the funding confirmation anyway, just ignore
6531 // Methods to get unprompted messages to send to the remote end (or where we already returned
6532 // something in the handler for the message that prompted this message):
6534 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6535 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6536 /// directions). Should be used for both broadcasted announcements and in response to an
6537 /// AnnouncementSignatures message from the remote peer.
6539 /// Will only fail if we're not in a state where channel_announcement may be sent (including
6542 /// This will only return ChannelError::Ignore upon failure.
6544 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6545 fn get_channel_announcement<NS: Deref>(
6546 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6547 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6548 if !self.context.config.announced_channel {
6549 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6551 if !self.context.is_usable() {
6552 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6555 let short_channel_id = self.context.get_short_channel_id()
6556 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6557 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6558 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6559 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6560 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6562 let msg = msgs::UnsignedChannelAnnouncement {
6563 features: channelmanager::provided_channel_features(&user_config),
6566 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6567 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6568 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6569 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6570 excess_data: Vec::new(),
6576 fn get_announcement_sigs<NS: Deref, L: Deref>(
6577 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6578 best_block_height: u32, logger: &L
6579 ) -> Option<msgs::AnnouncementSignatures>
6581 NS::Target: NodeSigner,
6584 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6588 if !self.context.is_usable() {
6592 if self.context.channel_state.is_peer_disconnected() {
6593 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6597 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6601 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6602 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6605 log_trace!(logger, "{:?}", e);
6609 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6611 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6616 match &self.context.holder_signer {
6617 ChannelSignerType::Ecdsa(ecdsa) => {
6618 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6620 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6625 let short_channel_id = match self.context.get_short_channel_id() {
6627 None => return None,
6630 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6632 Some(msgs::AnnouncementSignatures {
6633 channel_id: self.context.channel_id(),
6635 node_signature: our_node_sig,
6636 bitcoin_signature: our_bitcoin_sig,
6639 // TODO (taproot|arik)
6645 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6647 fn sign_channel_announcement<NS: Deref>(
6648 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6649 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6650 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6651 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6652 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6653 let were_node_one = announcement.node_id_1 == our_node_key;
6655 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6656 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6657 match &self.context.holder_signer {
6658 ChannelSignerType::Ecdsa(ecdsa) => {
6659 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6660 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6661 Ok(msgs::ChannelAnnouncement {
6662 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6663 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6664 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6665 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6666 contents: announcement,
6669 // TODO (taproot|arik)
6674 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6678 /// Processes an incoming announcement_signatures message, providing a fully-signed
6679 /// channel_announcement message which we can broadcast and storing our counterparty's
6680 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6681 pub fn announcement_signatures<NS: Deref>(
6682 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6683 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6684 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6685 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6687 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6689 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6690 return Err(ChannelError::Close(format!(
6691 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6692 &announcement, self.context.get_counterparty_node_id())));
6694 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6695 return Err(ChannelError::Close(format!(
6696 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6697 &announcement, self.context.counterparty_funding_pubkey())));
6700 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6701 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6702 return Err(ChannelError::Ignore(
6703 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6706 self.sign_channel_announcement(node_signer, announcement)
6709 /// Gets a signed channel_announcement for this channel, if we previously received an
6710 /// announcement_signatures from our counterparty.
6711 pub fn get_signed_channel_announcement<NS: Deref>(
6712 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6713 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6714 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6717 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6719 Err(_) => return None,
6721 match self.sign_channel_announcement(node_signer, announcement) {
6722 Ok(res) => Some(res),
6727 /// May panic if called on a channel that wasn't immediately-previously
6728 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6729 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6730 assert!(self.context.channel_state.is_peer_disconnected());
6731 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6732 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6733 // current to_remote balances. However, it no longer has any use, and thus is now simply
6734 // set to a dummy (but valid, as required by the spec) public key.
6735 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6736 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6737 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6738 let mut pk = [2; 33]; pk[1] = 0xff;
6739 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6740 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6741 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6742 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6745 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6748 self.mark_awaiting_response();
6749 msgs::ChannelReestablish {
6750 channel_id: self.context.channel_id(),
6751 // The protocol has two different commitment number concepts - the "commitment
6752 // transaction number", which starts from 0 and counts up, and the "revocation key
6753 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6754 // commitment transaction numbers by the index which will be used to reveal the
6755 // revocation key for that commitment transaction, which means we have to convert them
6756 // to protocol-level commitment numbers here...
6758 // next_local_commitment_number is the next commitment_signed number we expect to
6759 // receive (indicating if they need to resend one that we missed).
6760 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6761 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6762 // receive, however we track it by the next commitment number for a remote transaction
6763 // (which is one further, as they always revoke previous commitment transaction, not
6764 // the one we send) so we have to decrement by 1. Note that if
6765 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6766 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6768 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6769 your_last_per_commitment_secret: remote_last_secret,
6770 my_current_per_commitment_point: dummy_pubkey,
6771 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6772 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6773 // txid of that interactive transaction, else we MUST NOT set it.
6774 next_funding_txid: None,
6779 // Send stuff to our remote peers:
6781 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6782 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6783 /// commitment update.
6785 /// `Err`s will only be [`ChannelError::Ignore`].
6786 pub fn queue_add_htlc<F: Deref, L: Deref>(
6787 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6788 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6789 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6790 ) -> Result<(), ChannelError>
6791 where F::Target: FeeEstimator, L::Target: Logger
6794 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6795 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6796 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6798 if let ChannelError::Ignore(_) = err { /* fine */ }
6799 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6804 /// Adds a pending outbound HTLC to this channel, note that you probably want
6805 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6807 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6809 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6810 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6812 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6813 /// we may not yet have sent the previous commitment update messages and will need to
6814 /// regenerate them.
6816 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6817 /// on this [`Channel`] if `force_holding_cell` is false.
6819 /// `Err`s will only be [`ChannelError::Ignore`].
6820 fn send_htlc<F: Deref, L: Deref>(
6821 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6822 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6823 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6824 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6825 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6826 where F::Target: FeeEstimator, L::Target: Logger
6828 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6829 self.context.channel_state.is_local_shutdown_sent() ||
6830 self.context.channel_state.is_remote_shutdown_sent()
6832 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6834 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6835 if amount_msat > channel_total_msat {
6836 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6839 if amount_msat == 0 {
6840 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6843 let available_balances = self.context.get_available_balances(fee_estimator);
6844 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6845 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6846 available_balances.next_outbound_htlc_minimum_msat)));
6849 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6850 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6851 available_balances.next_outbound_htlc_limit_msat)));
6854 if self.context.channel_state.is_peer_disconnected() {
6855 // Note that this should never really happen, if we're !is_live() on receipt of an
6856 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6857 // the user to send directly into a !is_live() channel. However, if we
6858 // disconnected during the time the previous hop was doing the commitment dance we may
6859 // end up getting here after the forwarding delay. In any case, returning an
6860 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6861 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6864 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6865 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6866 payment_hash, amount_msat,
6867 if force_holding_cell { "into holding cell" }
6868 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6869 else { "to peer" });
6871 if need_holding_cell {
6872 force_holding_cell = true;
6875 // Now update local state:
6876 if force_holding_cell {
6877 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6882 onion_routing_packet,
6889 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6890 htlc_id: self.context.next_holder_htlc_id,
6892 payment_hash: payment_hash.clone(),
6894 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6900 let res = msgs::UpdateAddHTLC {
6901 channel_id: self.context.channel_id,
6902 htlc_id: self.context.next_holder_htlc_id,
6906 onion_routing_packet,
6910 self.context.next_holder_htlc_id += 1;
6915 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6916 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6917 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6918 // fail to generate this, we still are at least at a position where upgrading their status
6920 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6921 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6922 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6924 if let Some(state) = new_state {
6925 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6929 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6930 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6931 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6932 // Grab the preimage, if it exists, instead of cloning
6933 let mut reason = OutboundHTLCOutcome::Success(None);
6934 mem::swap(outcome, &mut reason);
6935 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6938 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6939 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6940 debug_assert!(!self.context.is_outbound());
6941 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6942 self.context.feerate_per_kw = feerate;
6943 self.context.pending_update_fee = None;
6946 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6948 let (mut htlcs_ref, counterparty_commitment_tx) =
6949 self.build_commitment_no_state_update(logger);
6950 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6951 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6952 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6954 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6955 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6958 self.context.latest_monitor_update_id += 1;
6959 let monitor_update = ChannelMonitorUpdate {
6960 update_id: self.context.latest_monitor_update_id,
6961 counterparty_node_id: Some(self.context.counterparty_node_id),
6962 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6963 commitment_txid: counterparty_commitment_txid,
6964 htlc_outputs: htlcs.clone(),
6965 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6966 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6967 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6968 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6969 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6971 channel_id: Some(self.context.channel_id()),
6973 self.context.channel_state.set_awaiting_remote_revoke();
6977 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6978 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6979 where L::Target: Logger
6981 let counterparty_keys = self.context.build_remote_transaction_keys();
6982 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6983 let counterparty_commitment_tx = commitment_stats.tx;
6985 #[cfg(any(test, fuzzing))]
6987 if !self.context.is_outbound() {
6988 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6989 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6990 if let Some(info) = projected_commit_tx_info {
6991 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6992 if info.total_pending_htlcs == total_pending_htlcs
6993 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6994 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6995 && info.feerate == self.context.feerate_per_kw {
6996 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6997 assert_eq!(actual_fee, info.fee);
7003 (commitment_stats.htlcs_included, counterparty_commitment_tx)
7006 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
7007 /// generation when we shouldn't change HTLC/channel state.
7008 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
7009 // Get the fee tests from `build_commitment_no_state_update`
7010 #[cfg(any(test, fuzzing))]
7011 self.build_commitment_no_state_update(logger);
7013 let counterparty_keys = self.context.build_remote_transaction_keys();
7014 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7015 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
7017 match &self.context.holder_signer {
7018 ChannelSignerType::Ecdsa(ecdsa) => {
7019 let (signature, htlc_signatures);
7022 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
7023 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
7027 let res = ecdsa.sign_counterparty_commitment(
7028 &commitment_stats.tx,
7029 commitment_stats.inbound_htlc_preimages,
7030 commitment_stats.outbound_htlc_preimages,
7031 &self.context.secp_ctx,
7032 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
7034 htlc_signatures = res.1;
7036 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
7037 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
7038 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
7039 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
7041 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
7042 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
7043 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
7044 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
7045 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
7046 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
7050 Ok((msgs::CommitmentSigned {
7051 channel_id: self.context.channel_id,
7055 partial_signature_with_nonce: None,
7056 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7058 // TODO (taproot|arik)
7064 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7065 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7067 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7068 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7069 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7070 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7071 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7072 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7073 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7074 where F::Target: FeeEstimator, L::Target: Logger
7076 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7077 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7078 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7081 let monitor_update = self.build_commitment_no_status_check(logger);
7082 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7083 Ok(self.push_ret_blockable_mon_update(monitor_update))
7089 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7091 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7092 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7093 fee_base_msat: msg.contents.fee_base_msat,
7094 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7095 cltv_expiry_delta: msg.contents.cltv_expiry_delta
7097 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7099 self.context.counterparty_forwarding_info = new_forwarding_info;
7105 /// Begins the shutdown process, getting a message for the remote peer and returning all
7106 /// holding cell HTLCs for payment failure.
7107 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7108 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7109 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7111 for htlc in self.context.pending_outbound_htlcs.iter() {
7112 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7113 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7116 if self.context.channel_state.is_local_shutdown_sent() {
7117 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7119 else if self.context.channel_state.is_remote_shutdown_sent() {
7120 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7122 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7123 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7125 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7126 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7127 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7130 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7133 // use override shutdown script if provided
7134 let shutdown_scriptpubkey = match override_shutdown_script {
7135 Some(script) => script,
7137 // otherwise, use the shutdown scriptpubkey provided by the signer
7138 match signer_provider.get_shutdown_scriptpubkey() {
7139 Ok(scriptpubkey) => scriptpubkey,
7140 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7144 if !shutdown_scriptpubkey.is_compatible(their_features) {
7145 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7147 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7152 // From here on out, we may not fail!
7153 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7154 self.context.channel_state.set_local_shutdown_sent();
7155 self.context.local_initiated_shutdown = Some(());
7156 self.context.update_time_counter += 1;
7158 let monitor_update = if update_shutdown_script {
7159 self.context.latest_monitor_update_id += 1;
7160 let monitor_update = ChannelMonitorUpdate {
7161 update_id: self.context.latest_monitor_update_id,
7162 counterparty_node_id: Some(self.context.counterparty_node_id),
7163 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7164 scriptpubkey: self.get_closing_scriptpubkey(),
7166 channel_id: Some(self.context.channel_id()),
7168 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7169 self.push_ret_blockable_mon_update(monitor_update)
7171 let shutdown = msgs::Shutdown {
7172 channel_id: self.context.channel_id,
7173 scriptpubkey: self.get_closing_scriptpubkey(),
7176 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7177 // our shutdown until we've committed all of the pending changes.
7178 self.context.holding_cell_update_fee = None;
7179 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7180 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7182 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7183 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7190 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7191 "we can't both complete shutdown and return a monitor update");
7193 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7196 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7197 self.context.holding_cell_htlc_updates.iter()
7198 .flat_map(|htlc_update| {
7200 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7201 => Some((source, payment_hash)),
7205 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7209 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7210 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7211 pub context: ChannelContext<SP>,
7212 pub unfunded_context: UnfundedChannelContext,
7215 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7216 pub fn new<ES: Deref, F: Deref>(
7217 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7218 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7219 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7220 ) -> Result<OutboundV1Channel<SP>, APIError>
7221 where ES::Target: EntropySource,
7222 F::Target: FeeEstimator
7224 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7225 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7226 // Protocol level safety check in place, although it should never happen because
7227 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7228 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7229 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7232 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7233 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7234 let pubkeys = holder_signer.pubkeys().clone();
7237 context: ChannelContext::new_for_outbound_channel(
7241 counterparty_node_id,
7243 channel_value_satoshis,
7247 current_chain_height,
7248 outbound_scid_alias,
7249 temporary_channel_id,
7250 holder_selected_channel_reserve_satoshis,
7255 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7260 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7261 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7262 let counterparty_keys = self.context.build_remote_transaction_keys();
7263 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7264 let signature = match &self.context.holder_signer {
7265 // TODO (taproot|arik): move match into calling method for Taproot
7266 ChannelSignerType::Ecdsa(ecdsa) => {
7267 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7268 .map(|(sig, _)| sig).ok()?
7270 // TODO (taproot|arik)
7275 if self.context.signer_pending_funding {
7276 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7277 self.context.signer_pending_funding = false;
7280 Some(msgs::FundingCreated {
7281 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7282 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7283 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7286 partial_signature_with_nonce: None,
7288 next_local_nonce: None,
7292 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7293 /// a funding_created message for the remote peer.
7294 /// Panics if called at some time other than immediately after initial handshake, if called twice,
7295 /// or if called on an inbound channel.
7296 /// Note that channel_id changes during this call!
7297 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7298 /// If an Err is returned, it is a ChannelError::Close.
7299 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7300 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7301 if !self.context.is_outbound() {
7302 panic!("Tried to create outbound funding_created message on an inbound channel!");
7305 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7306 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7308 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7310 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7311 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7312 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7313 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7316 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7317 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7319 // Now that we're past error-generating stuff, update our local state:
7321 self.context.channel_state = ChannelState::FundingNegotiated;
7322 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7324 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7325 // We can skip this if it is a zero-conf channel.
7326 if funding_transaction.is_coin_base() &&
7327 self.context.minimum_depth.unwrap_or(0) > 0 &&
7328 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7329 self.context.minimum_depth = Some(COINBASE_MATURITY);
7332 self.context.funding_transaction = Some(funding_transaction);
7333 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7335 let funding_created = self.get_funding_created_msg(logger);
7336 if funding_created.is_none() {
7337 #[cfg(not(async_signing))] {
7338 panic!("Failed to get signature for new funding creation");
7340 #[cfg(async_signing)] {
7341 if !self.context.signer_pending_funding {
7342 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7343 self.context.signer_pending_funding = true;
7351 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7352 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7353 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7354 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7355 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7356 ) -> Result<msgs::OpenChannel, ()>
7358 F::Target: FeeEstimator
7360 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7361 Ok(self.get_open_channel(chain_hash))
7364 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7365 if !self.context.is_outbound() {
7366 panic!("Tried to open a channel for an inbound channel?");
7368 if self.context.have_received_message() {
7369 panic!("Cannot generate an open_channel after we've moved forward");
7372 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7373 panic!("Tried to send an open_channel for a channel that has already advanced");
7376 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7377 let keys = self.context.get_holder_pubkeys();
7380 common_fields: msgs::CommonOpenChannelFields {
7382 temporary_channel_id: self.context.channel_id,
7383 funding_satoshis: self.context.channel_value_satoshis,
7384 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7385 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7386 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7387 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7388 to_self_delay: self.context.get_holder_selected_contest_delay(),
7389 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7390 funding_pubkey: keys.funding_pubkey,
7391 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7392 payment_basepoint: keys.payment_point,
7393 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7394 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7395 first_per_commitment_point,
7396 channel_flags: if self.context.config.announced_channel {1} else {0},
7397 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7398 Some(script) => script.clone().into_inner(),
7399 None => Builder::new().into_script(),
7401 channel_type: Some(self.context.channel_type.clone()),
7403 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7404 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7409 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7410 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7412 // Check sanity of message fields:
7413 if !self.context.is_outbound() {
7414 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7416 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7417 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7419 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7420 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7422 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7423 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7425 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7426 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7428 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7429 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7430 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7432 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7433 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7434 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7436 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7437 if msg.common_fields.to_self_delay > max_delay_acceptable {
7438 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7440 if msg.common_fields.max_accepted_htlcs < 1 {
7441 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7443 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7444 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7447 // Now check against optional parameters as set by config...
7448 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7449 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7451 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7452 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7454 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7455 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7457 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7458 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7460 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7461 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7463 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7464 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7466 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7467 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7470 if let Some(ty) = &msg.common_fields.channel_type {
7471 if *ty != self.context.channel_type {
7472 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7474 } else if their_features.supports_channel_type() {
7475 // Assume they've accepted the channel type as they said they understand it.
7477 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7478 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7479 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7481 self.context.channel_type = channel_type.clone();
7482 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7485 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7486 match &msg.common_fields.shutdown_scriptpubkey {
7487 &Some(ref script) => {
7488 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7489 if script.len() == 0 {
7492 if !script::is_bolt2_compliant(&script, their_features) {
7493 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7495 Some(script.clone())
7498 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7500 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7505 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7506 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7507 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7508 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7509 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7511 if peer_limits.trust_own_funding_0conf {
7512 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7514 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7517 let counterparty_pubkeys = ChannelPublicKeys {
7518 funding_pubkey: msg.common_fields.funding_pubkey,
7519 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7520 payment_point: msg.common_fields.payment_basepoint,
7521 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7522 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7525 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7526 selected_contest_delay: msg.common_fields.to_self_delay,
7527 pubkeys: counterparty_pubkeys,
7530 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7531 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7533 self.context.channel_state = ChannelState::NegotiatingFunding(
7534 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7536 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7541 /// Handles a funding_signed message from the remote end.
7542 /// If this call is successful, broadcast the funding transaction (and not before!)
7543 pub fn funding_signed<L: Deref>(
7544 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7545 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7549 if !self.context.is_outbound() {
7550 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7552 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7553 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7555 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7556 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7557 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7558 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7561 let funding_script = self.context.get_funding_redeemscript();
7563 let counterparty_keys = self.context.build_remote_transaction_keys();
7564 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7565 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7566 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7568 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7569 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7571 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7572 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7574 let trusted_tx = initial_commitment_tx.trust();
7575 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7576 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7577 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7578 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7579 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7583 let holder_commitment_tx = HolderCommitmentTransaction::new(
7584 initial_commitment_tx,
7587 &self.context.get_holder_pubkeys().funding_pubkey,
7588 self.context.counterparty_funding_pubkey()
7592 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7593 if validated.is_err() {
7594 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7597 let funding_redeemscript = self.context.get_funding_redeemscript();
7598 let funding_txo = self.context.get_funding_txo().unwrap();
7599 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7600 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7601 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7602 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7603 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7604 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7605 shutdown_script, self.context.get_holder_selected_contest_delay(),
7606 &self.context.destination_script, (funding_txo, funding_txo_script),
7607 &self.context.channel_transaction_parameters,
7608 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7610 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7611 channel_monitor.provide_initial_counterparty_commitment_tx(
7612 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7613 self.context.cur_counterparty_commitment_transaction_number,
7614 self.context.counterparty_cur_commitment_point.unwrap(),
7615 counterparty_initial_commitment_tx.feerate_per_kw(),
7616 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7617 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7619 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7620 if self.context.is_batch_funding() {
7621 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7623 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7625 self.context.cur_holder_commitment_transaction_number -= 1;
7626 self.context.cur_counterparty_commitment_transaction_number -= 1;
7628 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7630 let mut channel = Channel {
7631 context: self.context,
7632 #[cfg(dual_funding)]
7633 dual_funding_channel_context: None,
7636 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7637 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7638 Ok((channel, channel_monitor))
7641 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7643 #[cfg(async_signing)]
7644 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7645 if self.context.signer_pending_funding && self.context.is_outbound() {
7646 log_trace!(logger, "Signer unblocked a funding_created");
7647 self.get_funding_created_msg(logger)
7652 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7653 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7654 pub context: ChannelContext<SP>,
7655 pub unfunded_context: UnfundedChannelContext,
7658 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7659 /// [`msgs::CommonOpenChannelFields`].
7660 pub(super) fn channel_type_from_open_channel(
7661 common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7662 our_supported_features: &ChannelTypeFeatures
7663 ) -> Result<ChannelTypeFeatures, ChannelError> {
7664 if let Some(channel_type) = &common_fields.channel_type {
7665 if channel_type.supports_any_optional_bits() {
7666 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7669 // We only support the channel types defined by the `ChannelManager` in
7670 // `provided_channel_type_features`. The channel type must always support
7671 // `static_remote_key`.
7672 if !channel_type.requires_static_remote_key() {
7673 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7675 // Make sure we support all of the features behind the channel type.
7676 if !channel_type.is_subset(our_supported_features) {
7677 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7679 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7680 if channel_type.requires_scid_privacy() && announced_channel {
7681 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7683 Ok(channel_type.clone())
7685 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7686 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7687 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7693 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7694 /// Creates a new channel from a remote sides' request for one.
7695 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7696 pub fn new<ES: Deref, F: Deref, L: Deref>(
7697 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7698 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7699 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7700 current_chain_height: u32, logger: &L, is_0conf: bool,
7701 ) -> Result<InboundV1Channel<SP>, ChannelError>
7702 where ES::Target: EntropySource,
7703 F::Target: FeeEstimator,
7706 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7708 // First check the channel type is known, failing before we do anything else if we don't
7709 // support this channel type.
7710 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7712 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7713 let counterparty_pubkeys = ChannelPublicKeys {
7714 funding_pubkey: msg.common_fields.funding_pubkey,
7715 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7716 payment_point: msg.common_fields.payment_basepoint,
7717 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7718 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7722 context: ChannelContext::new_for_inbound_channel(
7726 counterparty_node_id,
7730 current_chain_height,
7735 counterparty_pubkeys,
7737 holder_selected_channel_reserve_satoshis,
7738 msg.channel_reserve_satoshis,
7740 msg.common_fields.clone(),
7742 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7747 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7748 /// should be sent back to the counterparty node.
7750 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7751 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7752 if self.context.is_outbound() {
7753 panic!("Tried to send accept_channel for an outbound channel?");
7756 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7757 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7759 panic!("Tried to send accept_channel after channel had moved forward");
7761 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7762 panic!("Tried to send an accept_channel for a channel that has already advanced");
7765 self.generate_accept_channel_message()
7768 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7769 /// inbound channel. If the intention is to accept an inbound channel, use
7770 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7772 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7773 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7774 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7775 let keys = self.context.get_holder_pubkeys();
7777 msgs::AcceptChannel {
7778 common_fields: msgs::CommonAcceptChannelFields {
7779 temporary_channel_id: self.context.channel_id,
7780 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7781 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7782 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7783 minimum_depth: self.context.minimum_depth.unwrap(),
7784 to_self_delay: self.context.get_holder_selected_contest_delay(),
7785 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7786 funding_pubkey: keys.funding_pubkey,
7787 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7788 payment_basepoint: keys.payment_point,
7789 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7790 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7791 first_per_commitment_point,
7792 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7793 Some(script) => script.clone().into_inner(),
7794 None => Builder::new().into_script(),
7796 channel_type: Some(self.context.channel_type.clone()),
7798 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7800 next_local_nonce: None,
7804 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7805 /// inbound channel without accepting it.
7807 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7809 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7810 self.generate_accept_channel_message()
7813 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7814 let funding_script = self.context.get_funding_redeemscript();
7816 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7817 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7818 let trusted_tx = initial_commitment_tx.trust();
7819 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7820 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7821 // They sign the holder commitment transaction...
7822 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7823 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7824 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7825 encode::serialize_hex(&funding_script), &self.context.channel_id());
7826 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7828 Ok(initial_commitment_tx)
7831 pub fn funding_created<L: Deref>(
7832 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7833 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7837 if self.context.is_outbound() {
7838 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7841 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7842 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7844 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7845 // remember the channel, so it's safe to just send an error_message here and drop the
7847 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7849 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7850 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7851 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7852 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7855 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7856 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7857 // This is an externally observable change before we finish all our checks. In particular
7858 // check_funding_created_signature may fail.
7859 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7861 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7863 Err(ChannelError::Close(e)) => {
7864 self.context.channel_transaction_parameters.funding_outpoint = None;
7865 return Err((self, ChannelError::Close(e)));
7868 // The only error we know how to handle is ChannelError::Close, so we fall over here
7869 // to make sure we don't continue with an inconsistent state.
7870 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7874 let holder_commitment_tx = HolderCommitmentTransaction::new(
7875 initial_commitment_tx,
7878 &self.context.get_holder_pubkeys().funding_pubkey,
7879 self.context.counterparty_funding_pubkey()
7882 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7883 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7886 // Now that we're past error-generating stuff, update our local state:
7888 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7889 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7890 self.context.cur_counterparty_commitment_transaction_number -= 1;
7891 self.context.cur_holder_commitment_transaction_number -= 1;
7893 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7895 let funding_redeemscript = self.context.get_funding_redeemscript();
7896 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7897 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7898 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7899 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7900 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7901 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7902 shutdown_script, self.context.get_holder_selected_contest_delay(),
7903 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7904 &self.context.channel_transaction_parameters,
7905 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7907 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7908 channel_monitor.provide_initial_counterparty_commitment_tx(
7909 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7910 self.context.cur_counterparty_commitment_transaction_number + 1,
7911 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7912 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7913 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7915 log_info!(logger, "{} funding_signed for peer for channel {}",
7916 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7918 // Promote the channel to a full-fledged one now that we have updated the state and have a
7919 // `ChannelMonitor`.
7920 let mut channel = Channel {
7921 context: self.context,
7922 #[cfg(dual_funding)]
7923 dual_funding_channel_context: None,
7925 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7926 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7928 Ok((channel, funding_signed, channel_monitor))
7932 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
7933 #[cfg(dual_funding)]
7934 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
7935 pub context: ChannelContext<SP>,
7936 pub unfunded_context: UnfundedChannelContext,
7937 #[cfg(dual_funding)]
7938 pub dual_funding_context: DualFundingChannelContext,
7941 #[cfg(dual_funding)]
7942 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
7943 pub fn new<ES: Deref, F: Deref>(
7944 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7945 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
7946 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
7947 funding_confirmation_target: ConfirmationTarget,
7948 ) -> Result<OutboundV2Channel<SP>, APIError>
7949 where ES::Target: EntropySource,
7950 F::Target: FeeEstimator,
7952 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
7953 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
7954 let pubkeys = holder_signer.pubkeys().clone();
7956 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
7958 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
7959 funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7961 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
7962 let funding_tx_locktime = current_chain_height;
7965 context: ChannelContext::new_for_outbound_channel(
7969 counterparty_node_id,
7975 current_chain_height,
7976 outbound_scid_alias,
7977 temporary_channel_id,
7978 holder_selected_channel_reserve_satoshis,
7983 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
7984 dual_funding_context: DualFundingChannelContext {
7985 our_funding_satoshis: funding_satoshis,
7986 their_funding_satoshis: 0,
7987 funding_tx_locktime,
7988 funding_feerate_sat_per_1000_weight,
7994 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7995 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7996 /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
7997 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7998 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7999 ) -> Result<msgs::OpenChannelV2, ()>
8001 F::Target: FeeEstimator
8003 self.context.maybe_downgrade_channel_features(fee_estimator)?;
8004 Ok(self.get_open_channel_v2(chain_hash))
8007 pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
8008 if self.context.have_received_message() {
8009 debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
8012 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8013 debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
8016 let first_per_commitment_point = self.context.holder_signer.as_ref()
8017 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
8018 &self.context.secp_ctx);
8019 let second_per_commitment_point = self.context.holder_signer.as_ref()
8020 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
8021 &self.context.secp_ctx);
8022 let keys = self.context.get_holder_pubkeys();
8024 msgs::OpenChannelV2 {
8025 common_fields: msgs::CommonOpenChannelFields {
8027 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8028 funding_satoshis: self.context.channel_value_satoshis,
8029 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8030 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8031 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8032 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8033 to_self_delay: self.context.get_holder_selected_contest_delay(),
8034 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8035 funding_pubkey: keys.funding_pubkey,
8036 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8037 payment_basepoint: keys.payment_point,
8038 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8039 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8040 first_per_commitment_point,
8041 channel_flags: if self.context.config.announced_channel {1} else {0},
8042 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8043 Some(script) => script.clone().into_inner(),
8044 None => Builder::new().into_script(),
8046 channel_type: Some(self.context.channel_type.clone()),
8048 funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8049 second_per_commitment_point,
8050 locktime: self.dual_funding_context.funding_tx_locktime,
8051 require_confirmed_inputs: None,
8056 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8057 #[cfg(dual_funding)]
8058 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8059 pub context: ChannelContext<SP>,
8060 pub unfunded_context: UnfundedChannelContext,
8061 pub dual_funding_context: DualFundingChannelContext,
8064 #[cfg(dual_funding)]
8065 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8066 /// Creates a new dual-funded channel from a remote side's request for one.
8067 /// Assumes chain_hash has already been checked and corresponds with what we expect!
8068 pub fn new<ES: Deref, F: Deref, L: Deref>(
8069 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8070 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8071 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8072 config: &UserConfig, current_chain_height: u32, logger: &L,
8073 ) -> Result<InboundV2Channel<SP>, ChannelError>
8074 where ES::Target: EntropySource,
8075 F::Target: FeeEstimator,
8078 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8079 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8080 channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8081 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8082 channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8084 // First check the channel type is known, failing before we do anything else if we don't
8085 // support this channel type.
8086 if msg.common_fields.channel_type.is_none() {
8087 return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8088 msg.common_fields.temporary_channel_id)))
8090 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8092 let counterparty_pubkeys = ChannelPublicKeys {
8093 funding_pubkey: msg.common_fields.funding_pubkey,
8094 revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8095 payment_point: msg.common_fields.payment_basepoint,
8096 delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8097 htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8100 let mut context = ChannelContext::new_for_inbound_channel(
8104 counterparty_node_id,
8108 current_chain_height,
8114 counterparty_pubkeys,
8116 holder_selected_channel_reserve_satoshis,
8117 counterparty_selected_channel_reserve_satoshis,
8118 0 /* push_msat not used in dual-funding */,
8119 msg.common_fields.clone(),
8121 let channel_id = ChannelId::v2_from_revocation_basepoints(
8122 &context.get_holder_pubkeys().revocation_basepoint,
8123 &context.get_counterparty_pubkeys().revocation_basepoint);
8124 context.channel_id = channel_id;
8128 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8129 dual_funding_context: DualFundingChannelContext {
8130 our_funding_satoshis: funding_satoshis,
8131 their_funding_satoshis: msg.common_fields.funding_satoshis,
8132 funding_tx_locktime: msg.locktime,
8133 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8140 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8141 /// should be sent back to the counterparty node.
8143 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8144 pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8145 if self.context.is_outbound() {
8146 debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8149 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8150 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8152 debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8154 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8155 debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8158 self.generate_accept_channel_v2_message()
8161 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8162 /// inbound channel. If the intention is to accept an inbound channel, use
8163 /// [`InboundV1Channel::accept_inbound_channel`] instead.
8165 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8166 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8167 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8168 self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8169 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8170 self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8171 let keys = self.context.get_holder_pubkeys();
8173 msgs::AcceptChannelV2 {
8174 common_fields: msgs::CommonAcceptChannelFields {
8175 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8176 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8177 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8178 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8179 minimum_depth: self.context.minimum_depth.unwrap(),
8180 to_self_delay: self.context.get_holder_selected_contest_delay(),
8181 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8182 funding_pubkey: keys.funding_pubkey,
8183 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8184 payment_basepoint: keys.payment_point,
8185 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8186 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8187 first_per_commitment_point,
8188 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8189 Some(script) => script.clone().into_inner(),
8190 None => Builder::new().into_script(),
8192 channel_type: Some(self.context.channel_type.clone()),
8194 funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8195 second_per_commitment_point,
8196 require_confirmed_inputs: None,
8200 /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8201 /// inbound channel without accepting it.
8203 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8205 pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8206 self.generate_accept_channel_v2_message()
8210 // Unfunded channel utilities
8212 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8213 // The default channel type (ie the first one we try) depends on whether the channel is
8214 // public - if it is, we just go with `only_static_remotekey` as it's the only option
8215 // available. If it's private, we first try `scid_privacy` as it provides better privacy
8216 // with no other changes, and fall back to `only_static_remotekey`.
8217 let mut ret = ChannelTypeFeatures::only_static_remote_key();
8218 if !config.channel_handshake_config.announced_channel &&
8219 config.channel_handshake_config.negotiate_scid_privacy &&
8220 their_features.supports_scid_privacy() {
8221 ret.set_scid_privacy_required();
8224 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8225 // set it now. If they don't understand it, we'll fall back to our default of
8226 // `only_static_remotekey`.
8227 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8228 their_features.supports_anchors_zero_fee_htlc_tx() {
8229 ret.set_anchors_zero_fee_htlc_tx_required();
8235 const SERIALIZATION_VERSION: u8 = 3;
8236 const MIN_SERIALIZATION_VERSION: u8 = 3;
8238 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8244 impl Writeable for ChannelUpdateStatus {
8245 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8246 // We only care about writing out the current state as it was announced, ie only either
8247 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8248 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8250 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8251 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8252 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8253 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8259 impl Readable for ChannelUpdateStatus {
8260 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8261 Ok(match <u8 as Readable>::read(reader)? {
8262 0 => ChannelUpdateStatus::Enabled,
8263 1 => ChannelUpdateStatus::Disabled,
8264 _ => return Err(DecodeError::InvalidValue),
8269 impl Writeable for AnnouncementSigsState {
8270 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8271 // We only care about writing out the current state as if we had just disconnected, at
8272 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8274 AnnouncementSigsState::NotSent => 0u8.write(writer),
8275 AnnouncementSigsState::MessageSent => 0u8.write(writer),
8276 AnnouncementSigsState::Committed => 0u8.write(writer),
8277 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8282 impl Readable for AnnouncementSigsState {
8283 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8284 Ok(match <u8 as Readable>::read(reader)? {
8285 0 => AnnouncementSigsState::NotSent,
8286 1 => AnnouncementSigsState::PeerReceived,
8287 _ => return Err(DecodeError::InvalidValue),
8292 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8293 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8294 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8297 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
8299 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8300 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8301 // the low bytes now and the optional high bytes later.
8302 let user_id_low = self.context.user_id as u64;
8303 user_id_low.write(writer)?;
8305 // Version 1 deserializers expected to read parts of the config object here. Version 2
8306 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8307 // `minimum_depth` we simply write dummy values here.
8308 writer.write_all(&[0; 8])?;
8310 self.context.channel_id.write(writer)?;
8312 let mut channel_state = self.context.channel_state;
8313 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8314 channel_state.set_peer_disconnected();
8316 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8318 channel_state.to_u32().write(writer)?;
8320 self.context.channel_value_satoshis.write(writer)?;
8322 self.context.latest_monitor_update_id.write(writer)?;
8324 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8325 // deserialized from that format.
8326 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8327 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8328 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8330 self.context.destination_script.write(writer)?;
8332 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8333 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8334 self.context.value_to_self_msat.write(writer)?;
8336 let mut dropped_inbound_htlcs = 0;
8337 for htlc in self.context.pending_inbound_htlcs.iter() {
8338 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8339 dropped_inbound_htlcs += 1;
8342 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8343 for htlc in self.context.pending_inbound_htlcs.iter() {
8344 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8347 htlc.htlc_id.write(writer)?;
8348 htlc.amount_msat.write(writer)?;
8349 htlc.cltv_expiry.write(writer)?;
8350 htlc.payment_hash.write(writer)?;
8352 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8353 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
8355 htlc_state.write(writer)?;
8357 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
8359 htlc_state.write(writer)?;
8361 &InboundHTLCState::Committed => {
8364 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8366 removal_reason.write(writer)?;
8371 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8372 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8373 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8375 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8376 for htlc in self.context.pending_outbound_htlcs.iter() {
8377 htlc.htlc_id.write(writer)?;
8378 htlc.amount_msat.write(writer)?;
8379 htlc.cltv_expiry.write(writer)?;
8380 htlc.payment_hash.write(writer)?;
8381 htlc.source.write(writer)?;
8383 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8385 onion_packet.write(writer)?;
8387 &OutboundHTLCState::Committed => {
8390 &OutboundHTLCState::RemoteRemoved(_) => {
8391 // Treat this as a Committed because we haven't received the CS - they'll
8392 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8395 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8397 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8398 preimages.push(preimage);
8400 let reason: Option<&HTLCFailReason> = outcome.into();
8401 reason.write(writer)?;
8403 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8405 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8406 preimages.push(preimage);
8408 let reason: Option<&HTLCFailReason> = outcome.into();
8409 reason.write(writer)?;
8412 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8413 pending_outbound_blinding_points.push(htlc.blinding_point);
8416 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8417 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8418 // Vec of (htlc_id, failure_code, sha256_of_onion)
8419 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8420 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8421 for update in self.context.holding_cell_htlc_updates.iter() {
8423 &HTLCUpdateAwaitingACK::AddHTLC {
8424 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8425 blinding_point, skimmed_fee_msat,
8428 amount_msat.write(writer)?;
8429 cltv_expiry.write(writer)?;
8430 payment_hash.write(writer)?;
8431 source.write(writer)?;
8432 onion_routing_packet.write(writer)?;
8434 holding_cell_skimmed_fees.push(skimmed_fee_msat);
8435 holding_cell_blinding_points.push(blinding_point);
8437 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8439 payment_preimage.write(writer)?;
8440 htlc_id.write(writer)?;
8442 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8444 htlc_id.write(writer)?;
8445 err_packet.write(writer)?;
8447 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8448 htlc_id, failure_code, sha256_of_onion
8450 // We don't want to break downgrading by adding a new variant, so write a dummy
8451 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8452 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8454 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8456 htlc_id.write(writer)?;
8457 dummy_err_packet.write(writer)?;
8462 match self.context.resend_order {
8463 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8464 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8467 self.context.monitor_pending_channel_ready.write(writer)?;
8468 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8469 self.context.monitor_pending_commitment_signed.write(writer)?;
8471 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8472 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8473 pending_forward.write(writer)?;
8474 htlc_id.write(writer)?;
8477 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8478 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8479 htlc_source.write(writer)?;
8480 payment_hash.write(writer)?;
8481 fail_reason.write(writer)?;
8484 if self.context.is_outbound() {
8485 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8486 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8487 Some(feerate).write(writer)?;
8489 // As for inbound HTLCs, if the update was only announced and never committed in a
8490 // commitment_signed, drop it.
8491 None::<u32>.write(writer)?;
8493 self.context.holding_cell_update_fee.write(writer)?;
8495 self.context.next_holder_htlc_id.write(writer)?;
8496 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8497 self.context.update_time_counter.write(writer)?;
8498 self.context.feerate_per_kw.write(writer)?;
8500 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8501 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8502 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8503 // consider the stale state on reload.
8506 self.context.funding_tx_confirmed_in.write(writer)?;
8507 self.context.funding_tx_confirmation_height.write(writer)?;
8508 self.context.short_channel_id.write(writer)?;
8510 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8511 self.context.holder_dust_limit_satoshis.write(writer)?;
8512 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8514 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8515 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8517 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8518 self.context.holder_htlc_minimum_msat.write(writer)?;
8519 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8521 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8522 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8524 match &self.context.counterparty_forwarding_info {
8527 info.fee_base_msat.write(writer)?;
8528 info.fee_proportional_millionths.write(writer)?;
8529 info.cltv_expiry_delta.write(writer)?;
8531 None => 0u8.write(writer)?
8534 self.context.channel_transaction_parameters.write(writer)?;
8535 self.context.funding_transaction.write(writer)?;
8537 self.context.counterparty_cur_commitment_point.write(writer)?;
8538 self.context.counterparty_prev_commitment_point.write(writer)?;
8539 self.context.counterparty_node_id.write(writer)?;
8541 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8543 self.context.commitment_secrets.write(writer)?;
8545 self.context.channel_update_status.write(writer)?;
8547 #[cfg(any(test, fuzzing))]
8548 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8549 #[cfg(any(test, fuzzing))]
8550 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8551 htlc.write(writer)?;
8554 // If the channel type is something other than only-static-remote-key, then we need to have
8555 // older clients fail to deserialize this channel at all. If the type is
8556 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8558 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8559 Some(&self.context.channel_type) } else { None };
8561 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8562 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8563 // a different percentage of the channel value then 10%, which older versions of LDK used
8564 // to set it to before the percentage was made configurable.
8565 let serialized_holder_selected_reserve =
8566 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8567 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8569 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8570 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8571 let serialized_holder_htlc_max_in_flight =
8572 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8573 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8575 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8576 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8578 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8579 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8580 // we write the high bytes as an option here.
8581 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8583 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8585 write_tlv_fields!(writer, {
8586 (0, self.context.announcement_sigs, option),
8587 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8588 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8589 // them twice, once with their original default values above, and once as an option
8590 // here. On the read side, old versions will simply ignore the odd-type entries here,
8591 // and new versions map the default values to None and allow the TLV entries here to
8593 (1, self.context.minimum_depth, option),
8594 (2, chan_type, option),
8595 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8596 (4, serialized_holder_selected_reserve, option),
8597 (5, self.context.config, required),
8598 (6, serialized_holder_htlc_max_in_flight, option),
8599 (7, self.context.shutdown_scriptpubkey, option),
8600 (8, self.context.blocked_monitor_updates, optional_vec),
8601 (9, self.context.target_closing_feerate_sats_per_kw, option),
8602 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8603 (13, self.context.channel_creation_height, required),
8604 (15, preimages, required_vec),
8605 (17, self.context.announcement_sigs_state, required),
8606 (19, self.context.latest_inbound_scid_alias, option),
8607 (21, self.context.outbound_scid_alias, required),
8608 (23, channel_ready_event_emitted, option),
8609 (25, user_id_high_opt, option),
8610 (27, self.context.channel_keys_id, required),
8611 (28, holder_max_accepted_htlcs, option),
8612 (29, self.context.temporary_channel_id, option),
8613 (31, channel_pending_event_emitted, option),
8614 (35, pending_outbound_skimmed_fees, optional_vec),
8615 (37, holding_cell_skimmed_fees, optional_vec),
8616 (38, self.context.is_batch_funding, option),
8617 (39, pending_outbound_blinding_points, optional_vec),
8618 (41, holding_cell_blinding_points, optional_vec),
8619 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8620 (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8627 const MAX_ALLOC_SIZE: usize = 64*1024;
8628 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8630 ES::Target: EntropySource,
8631 SP::Target: SignerProvider
8633 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8634 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8635 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8637 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8638 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8639 // the low bytes now and the high bytes later.
8640 let user_id_low: u64 = Readable::read(reader)?;
8642 let mut config = Some(LegacyChannelConfig::default());
8644 // Read the old serialization of the ChannelConfig from version 0.0.98.
8645 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8646 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8647 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8648 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8650 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8651 let mut _val: u64 = Readable::read(reader)?;
8654 let channel_id = Readable::read(reader)?;
8655 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8656 let channel_value_satoshis = Readable::read(reader)?;
8658 let latest_monitor_update_id = Readable::read(reader)?;
8660 let mut keys_data = None;
8662 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8663 // the `channel_keys_id` TLV is present below.
8664 let keys_len: u32 = Readable::read(reader)?;
8665 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8666 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8667 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8668 let mut data = [0; 1024];
8669 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8670 reader.read_exact(read_slice)?;
8671 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8675 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8676 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8677 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8680 let destination_script = Readable::read(reader)?;
8682 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8683 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8684 let value_to_self_msat = Readable::read(reader)?;
8686 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8688 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8689 for _ in 0..pending_inbound_htlc_count {
8690 pending_inbound_htlcs.push(InboundHTLCOutput {
8691 htlc_id: Readable::read(reader)?,
8692 amount_msat: Readable::read(reader)?,
8693 cltv_expiry: Readable::read(reader)?,
8694 payment_hash: Readable::read(reader)?,
8695 state: match <u8 as Readable>::read(reader)? {
8696 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8697 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8698 3 => InboundHTLCState::Committed,
8699 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8700 _ => return Err(DecodeError::InvalidValue),
8705 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8706 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8707 for _ in 0..pending_outbound_htlc_count {
8708 pending_outbound_htlcs.push(OutboundHTLCOutput {
8709 htlc_id: Readable::read(reader)?,
8710 amount_msat: Readable::read(reader)?,
8711 cltv_expiry: Readable::read(reader)?,
8712 payment_hash: Readable::read(reader)?,
8713 source: Readable::read(reader)?,
8714 state: match <u8 as Readable>::read(reader)? {
8715 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8716 1 => OutboundHTLCState::Committed,
8718 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8719 OutboundHTLCState::RemoteRemoved(option.into())
8722 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8723 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8726 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8727 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8729 _ => return Err(DecodeError::InvalidValue),
8731 skimmed_fee_msat: None,
8732 blinding_point: None,
8736 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8737 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8738 for _ in 0..holding_cell_htlc_update_count {
8739 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8740 0 => HTLCUpdateAwaitingACK::AddHTLC {
8741 amount_msat: Readable::read(reader)?,
8742 cltv_expiry: Readable::read(reader)?,
8743 payment_hash: Readable::read(reader)?,
8744 source: Readable::read(reader)?,
8745 onion_routing_packet: Readable::read(reader)?,
8746 skimmed_fee_msat: None,
8747 blinding_point: None,
8749 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8750 payment_preimage: Readable::read(reader)?,
8751 htlc_id: Readable::read(reader)?,
8753 2 => HTLCUpdateAwaitingACK::FailHTLC {
8754 htlc_id: Readable::read(reader)?,
8755 err_packet: Readable::read(reader)?,
8757 _ => return Err(DecodeError::InvalidValue),
8761 let resend_order = match <u8 as Readable>::read(reader)? {
8762 0 => RAACommitmentOrder::CommitmentFirst,
8763 1 => RAACommitmentOrder::RevokeAndACKFirst,
8764 _ => return Err(DecodeError::InvalidValue),
8767 let monitor_pending_channel_ready = Readable::read(reader)?;
8768 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8769 let monitor_pending_commitment_signed = Readable::read(reader)?;
8771 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8772 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8773 for _ in 0..monitor_pending_forwards_count {
8774 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8777 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8778 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8779 for _ in 0..monitor_pending_failures_count {
8780 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8783 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8785 let holding_cell_update_fee = Readable::read(reader)?;
8787 let next_holder_htlc_id = Readable::read(reader)?;
8788 let next_counterparty_htlc_id = Readable::read(reader)?;
8789 let update_time_counter = Readable::read(reader)?;
8790 let feerate_per_kw = Readable::read(reader)?;
8792 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8793 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8794 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8795 // consider the stale state on reload.
8796 match <u8 as Readable>::read(reader)? {
8799 let _: u32 = Readable::read(reader)?;
8800 let _: u64 = Readable::read(reader)?;
8801 let _: Signature = Readable::read(reader)?;
8803 _ => return Err(DecodeError::InvalidValue),
8806 let funding_tx_confirmed_in = Readable::read(reader)?;
8807 let funding_tx_confirmation_height = Readable::read(reader)?;
8808 let short_channel_id = Readable::read(reader)?;
8810 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8811 let holder_dust_limit_satoshis = Readable::read(reader)?;
8812 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8813 let mut counterparty_selected_channel_reserve_satoshis = None;
8815 // Read the old serialization from version 0.0.98.
8816 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8818 // Read the 8 bytes of backwards-compatibility data.
8819 let _dummy: u64 = Readable::read(reader)?;
8821 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8822 let holder_htlc_minimum_msat = Readable::read(reader)?;
8823 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8825 let mut minimum_depth = None;
8827 // Read the old serialization from version 0.0.98.
8828 minimum_depth = Some(Readable::read(reader)?);
8830 // Read the 4 bytes of backwards-compatibility data.
8831 let _dummy: u32 = Readable::read(reader)?;
8834 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8836 1 => Some(CounterpartyForwardingInfo {
8837 fee_base_msat: Readable::read(reader)?,
8838 fee_proportional_millionths: Readable::read(reader)?,
8839 cltv_expiry_delta: Readable::read(reader)?,
8841 _ => return Err(DecodeError::InvalidValue),
8844 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8845 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8847 let counterparty_cur_commitment_point = Readable::read(reader)?;
8849 let counterparty_prev_commitment_point = Readable::read(reader)?;
8850 let counterparty_node_id = Readable::read(reader)?;
8852 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8853 let commitment_secrets = Readable::read(reader)?;
8855 let channel_update_status = Readable::read(reader)?;
8857 #[cfg(any(test, fuzzing))]
8858 let mut historical_inbound_htlc_fulfills = new_hash_set();
8859 #[cfg(any(test, fuzzing))]
8861 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8862 for _ in 0..htlc_fulfills_len {
8863 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8867 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8868 Some((feerate, if channel_parameters.is_outbound_from_holder {
8869 FeeUpdateState::Outbound
8871 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8877 let mut announcement_sigs = None;
8878 let mut target_closing_feerate_sats_per_kw = None;
8879 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8880 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8881 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8882 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8883 // only, so we default to that if none was written.
8884 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8885 let mut channel_creation_height = Some(serialized_height);
8886 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8888 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8889 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8890 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8891 let mut latest_inbound_scid_alias = None;
8892 let mut outbound_scid_alias = None;
8893 let mut channel_pending_event_emitted = None;
8894 let mut channel_ready_event_emitted = None;
8896 let mut user_id_high_opt: Option<u64> = None;
8897 let mut channel_keys_id: Option<[u8; 32]> = None;
8898 let mut temporary_channel_id: Option<ChannelId> = None;
8899 let mut holder_max_accepted_htlcs: Option<u16> = None;
8901 let mut blocked_monitor_updates = Some(Vec::new());
8903 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8904 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8906 let mut is_batch_funding: Option<()> = None;
8908 let mut local_initiated_shutdown: Option<()> = None;
8910 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8911 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8913 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8915 read_tlv_fields!(reader, {
8916 (0, announcement_sigs, option),
8917 (1, minimum_depth, option),
8918 (2, channel_type, option),
8919 (3, counterparty_selected_channel_reserve_satoshis, option),
8920 (4, holder_selected_channel_reserve_satoshis, option),
8921 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8922 (6, holder_max_htlc_value_in_flight_msat, option),
8923 (7, shutdown_scriptpubkey, option),
8924 (8, blocked_monitor_updates, optional_vec),
8925 (9, target_closing_feerate_sats_per_kw, option),
8926 (11, monitor_pending_finalized_fulfills, optional_vec),
8927 (13, channel_creation_height, option),
8928 (15, preimages_opt, optional_vec),
8929 (17, announcement_sigs_state, option),
8930 (19, latest_inbound_scid_alias, option),
8931 (21, outbound_scid_alias, option),
8932 (23, channel_ready_event_emitted, option),
8933 (25, user_id_high_opt, option),
8934 (27, channel_keys_id, option),
8935 (28, holder_max_accepted_htlcs, option),
8936 (29, temporary_channel_id, option),
8937 (31, channel_pending_event_emitted, option),
8938 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8939 (37, holding_cell_skimmed_fees_opt, optional_vec),
8940 (38, is_batch_funding, option),
8941 (39, pending_outbound_blinding_points_opt, optional_vec),
8942 (41, holding_cell_blinding_points_opt, optional_vec),
8943 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8944 (45, local_initiated_shutdown, option),
8947 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8948 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8949 // If we've gotten to the funding stage of the channel, populate the signer with its
8950 // required channel parameters.
8951 if channel_state >= ChannelState::FundingNegotiated {
8952 holder_signer.provide_channel_parameters(&channel_parameters);
8954 (channel_keys_id, holder_signer)
8956 // `keys_data` can be `None` if we had corrupted data.
8957 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8958 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8959 (holder_signer.channel_keys_id(), holder_signer)
8962 if let Some(preimages) = preimages_opt {
8963 let mut iter = preimages.into_iter();
8964 for htlc in pending_outbound_htlcs.iter_mut() {
8966 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8967 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8969 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8970 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8975 // We expect all preimages to be consumed above
8976 if iter.next().is_some() {
8977 return Err(DecodeError::InvalidValue);
8981 let chan_features = channel_type.as_ref().unwrap();
8982 if !chan_features.is_subset(our_supported_features) {
8983 // If the channel was written by a new version and negotiated with features we don't
8984 // understand yet, refuse to read it.
8985 return Err(DecodeError::UnknownRequiredFeature);
8988 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8989 // To account for that, we're proactively setting/overriding the field here.
8990 channel_parameters.channel_type_features = chan_features.clone();
8992 let mut secp_ctx = Secp256k1::new();
8993 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8995 // `user_id` used to be a single u64 value. In order to remain backwards
8996 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8997 // separate u64 values.
8998 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
9000 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
9002 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
9003 let mut iter = skimmed_fees.into_iter();
9004 for htlc in pending_outbound_htlcs.iter_mut() {
9005 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9007 // We expect all skimmed fees to be consumed above
9008 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9010 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
9011 let mut iter = skimmed_fees.into_iter();
9012 for htlc in holding_cell_htlc_updates.iter_mut() {
9013 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
9014 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9017 // We expect all skimmed fees to be consumed above
9018 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9020 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
9021 let mut iter = blinding_pts.into_iter();
9022 for htlc in pending_outbound_htlcs.iter_mut() {
9023 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9025 // We expect all blinding points to be consumed above
9026 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9028 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
9029 let mut iter = blinding_pts.into_iter();
9030 for htlc in holding_cell_htlc_updates.iter_mut() {
9031 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
9032 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9035 // We expect all blinding points to be consumed above
9036 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9039 if let Some(malformed_htlcs) = malformed_htlcs {
9040 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9041 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9042 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9043 let matches = *htlc_id == malformed_htlc_id;
9044 if matches { debug_assert!(err_packet.data.is_empty()) }
9047 }).ok_or(DecodeError::InvalidValue)?;
9048 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9049 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9051 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9056 context: ChannelContext {
9059 config: config.unwrap(),
9063 // Note that we don't care about serializing handshake limits as we only ever serialize
9064 // channel data after the handshake has completed.
9065 inbound_handshake_limits_override: None,
9068 temporary_channel_id,
9070 announcement_sigs_state: announcement_sigs_state.unwrap(),
9072 channel_value_satoshis,
9074 latest_monitor_update_id,
9076 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9077 shutdown_scriptpubkey,
9080 cur_holder_commitment_transaction_number,
9081 cur_counterparty_commitment_transaction_number,
9084 holder_max_accepted_htlcs,
9085 pending_inbound_htlcs,
9086 pending_outbound_htlcs,
9087 holding_cell_htlc_updates,
9091 monitor_pending_channel_ready,
9092 monitor_pending_revoke_and_ack,
9093 monitor_pending_commitment_signed,
9094 monitor_pending_forwards,
9095 monitor_pending_failures,
9096 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9098 signer_pending_commitment_update: false,
9099 signer_pending_funding: false,
9102 holding_cell_update_fee,
9103 next_holder_htlc_id,
9104 next_counterparty_htlc_id,
9105 update_time_counter,
9108 #[cfg(debug_assertions)]
9109 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9110 #[cfg(debug_assertions)]
9111 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9113 last_sent_closing_fee: None,
9114 pending_counterparty_closing_signed: None,
9115 expecting_peer_commitment_signed: false,
9116 closing_fee_limits: None,
9117 target_closing_feerate_sats_per_kw,
9119 funding_tx_confirmed_in,
9120 funding_tx_confirmation_height,
9122 channel_creation_height: channel_creation_height.unwrap(),
9124 counterparty_dust_limit_satoshis,
9125 holder_dust_limit_satoshis,
9126 counterparty_max_htlc_value_in_flight_msat,
9127 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9128 counterparty_selected_channel_reserve_satoshis,
9129 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9130 counterparty_htlc_minimum_msat,
9131 holder_htlc_minimum_msat,
9132 counterparty_max_accepted_htlcs,
9135 counterparty_forwarding_info,
9137 channel_transaction_parameters: channel_parameters,
9138 funding_transaction,
9141 counterparty_cur_commitment_point,
9142 counterparty_prev_commitment_point,
9143 counterparty_node_id,
9145 counterparty_shutdown_scriptpubkey,
9149 channel_update_status,
9150 closing_signed_in_flight: false,
9154 #[cfg(any(test, fuzzing))]
9155 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9156 #[cfg(any(test, fuzzing))]
9157 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9159 workaround_lnd_bug_4006: None,
9160 sent_message_awaiting_response: None,
9162 latest_inbound_scid_alias,
9163 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9164 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9166 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9167 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9169 #[cfg(any(test, fuzzing))]
9170 historical_inbound_htlc_fulfills,
9172 channel_type: channel_type.unwrap(),
9175 local_initiated_shutdown,
9177 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9179 #[cfg(dual_funding)]
9180 dual_funding_channel_context: None,
9188 use bitcoin::blockdata::constants::ChainHash;
9189 use bitcoin::blockdata::script::{ScriptBuf, Builder};
9190 use bitcoin::blockdata::transaction::{Transaction, TxOut};
9191 use bitcoin::blockdata::opcodes;
9192 use bitcoin::network::constants::Network;
9193 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9194 use crate::ln::{PaymentHash, PaymentPreimage};
9195 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9196 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9197 use crate::ln::channel::InitFeatures;
9198 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9199 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9200 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9201 use crate::ln::msgs;
9202 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9203 use crate::ln::script::ShutdownScript;
9204 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9205 use crate::chain::BestBlock;
9206 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9207 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9208 use crate::chain::transaction::OutPoint;
9209 use crate::routing::router::{Path, RouteHop};
9210 use crate::util::config::UserConfig;
9211 use crate::util::errors::APIError;
9212 use crate::util::ser::{ReadableArgs, Writeable};
9213 use crate::util::test_utils;
9214 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9215 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9216 use bitcoin::secp256k1::ffi::Signature as FFISignature;
9217 use bitcoin::secp256k1::{SecretKey,PublicKey};
9218 use bitcoin::hashes::sha256::Hash as Sha256;
9219 use bitcoin::hashes::Hash;
9220 use bitcoin::hashes::hex::FromHex;
9221 use bitcoin::hash_types::WPubkeyHash;
9222 use bitcoin::blockdata::locktime::absolute::LockTime;
9223 use bitcoin::address::{WitnessProgram, WitnessVersion};
9224 use crate::prelude::*;
9227 fn test_channel_state_order() {
9228 use crate::ln::channel::NegotiatingFundingFlags;
9229 use crate::ln::channel::AwaitingChannelReadyFlags;
9230 use crate::ln::channel::ChannelReadyFlags;
9232 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9233 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9234 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9235 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9238 struct TestFeeEstimator {
9241 impl FeeEstimator for TestFeeEstimator {
9242 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9248 fn test_max_funding_satoshis_no_wumbo() {
9249 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9250 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9251 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9255 signer: InMemorySigner,
9258 impl EntropySource for Keys {
9259 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9262 impl SignerProvider for Keys {
9263 type EcdsaSigner = InMemorySigner;
9265 type TaprootSigner = InMemorySigner;
9267 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9268 self.signer.channel_keys_id()
9271 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9275 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9277 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9278 let secp_ctx = Secp256k1::signing_only();
9279 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9280 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9281 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9284 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9285 let secp_ctx = Secp256k1::signing_only();
9286 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9287 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9291 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9292 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9293 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9297 fn upfront_shutdown_script_incompatibility() {
9298 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9299 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9300 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9303 let seed = [42; 32];
9304 let network = Network::Testnet;
9305 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9306 keys_provider.expect(OnGetShutdownScriptpubkey {
9307 returns: non_v0_segwit_shutdown_script.clone(),
9310 let secp_ctx = Secp256k1::new();
9311 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9312 let config = UserConfig::default();
9313 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9314 Err(APIError::IncompatibleShutdownScript { script }) => {
9315 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9317 Err(e) => panic!("Unexpected error: {:?}", e),
9318 Ok(_) => panic!("Expected error"),
9322 // Check that, during channel creation, we use the same feerate in the open channel message
9323 // as we do in the Channel object creation itself.
9325 fn test_open_channel_msg_fee() {
9326 let original_fee = 253;
9327 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9328 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9329 let secp_ctx = Secp256k1::new();
9330 let seed = [42; 32];
9331 let network = Network::Testnet;
9332 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9334 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9335 let config = UserConfig::default();
9336 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9338 // Now change the fee so we can check that the fee in the open_channel message is the
9339 // same as the old fee.
9340 fee_est.fee_est = 500;
9341 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9342 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9346 fn test_holder_vs_counterparty_dust_limit() {
9347 // Test that when calculating the local and remote commitment transaction fees, the correct
9348 // dust limits are used.
9349 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9350 let secp_ctx = Secp256k1::new();
9351 let seed = [42; 32];
9352 let network = Network::Testnet;
9353 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9354 let logger = test_utils::TestLogger::new();
9355 let best_block = BestBlock::from_network(network);
9357 // Go through the flow of opening a channel between two nodes, making sure
9358 // they have different dust limits.
9360 // Create Node A's channel pointing to Node B's pubkey
9361 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9362 let config = UserConfig::default();
9363 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9365 // Create Node B's channel by receiving Node A's open_channel message
9366 // Make sure A's dust limit is as we expect.
9367 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9368 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9369 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9371 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9372 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9373 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9374 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9375 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9377 // Node A --> Node B: funding created
9378 let output_script = node_a_chan.context.get_funding_redeemscript();
9379 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9380 value: 10000000, script_pubkey: output_script.clone(),
9382 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9383 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9384 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9386 // Node B --> Node A: funding signed
9387 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9388 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9390 // Put some inbound and outbound HTLCs in A's channel.
9391 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9392 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9394 amount_msat: htlc_amount_msat,
9395 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9396 cltv_expiry: 300000000,
9397 state: InboundHTLCState::Committed,
9400 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9402 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9403 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9404 cltv_expiry: 200000000,
9405 state: OutboundHTLCState::Committed,
9406 source: HTLCSource::OutboundRoute {
9407 path: Path { hops: Vec::new(), blinded_tail: None },
9408 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9409 first_hop_htlc_msat: 548,
9410 payment_id: PaymentId([42; 32]),
9412 skimmed_fee_msat: None,
9413 blinding_point: None,
9416 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9417 // the dust limit check.
9418 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9419 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9420 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9421 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9423 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9424 // of the HTLCs are seen to be above the dust limit.
9425 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9426 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9427 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9428 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9429 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9433 fn test_timeout_vs_success_htlc_dust_limit() {
9434 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9435 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9436 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9437 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9438 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9439 let secp_ctx = Secp256k1::new();
9440 let seed = [42; 32];
9441 let network = Network::Testnet;
9442 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9444 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9445 let config = UserConfig::default();
9446 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9448 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9449 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9451 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9452 // counted as dust when it shouldn't be.
9453 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9454 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9455 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9456 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9458 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9459 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9460 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9461 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9462 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9464 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9466 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9467 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9468 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9469 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9470 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9472 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9473 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9474 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9475 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9476 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9480 fn channel_reestablish_no_updates() {
9481 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9482 let logger = test_utils::TestLogger::new();
9483 let secp_ctx = Secp256k1::new();
9484 let seed = [42; 32];
9485 let network = Network::Testnet;
9486 let best_block = BestBlock::from_network(network);
9487 let chain_hash = ChainHash::using_genesis_block(network);
9488 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9490 // Go through the flow of opening a channel between two nodes.
9492 // Create Node A's channel pointing to Node B's pubkey
9493 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9494 let config = UserConfig::default();
9495 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9497 // Create Node B's channel by receiving Node A's open_channel message
9498 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9499 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9500 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9502 // Node B --> Node A: accept channel
9503 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9504 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9506 // Node A --> Node B: funding created
9507 let output_script = node_a_chan.context.get_funding_redeemscript();
9508 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9509 value: 10000000, script_pubkey: output_script.clone(),
9511 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9512 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9513 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9515 // Node B --> Node A: funding signed
9516 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9517 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9519 // Now disconnect the two nodes and check that the commitment point in
9520 // Node B's channel_reestablish message is sane.
9521 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9522 let msg = node_b_chan.get_channel_reestablish(&&logger);
9523 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9524 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9525 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9527 // Check that the commitment point in Node A's channel_reestablish message
9529 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9530 let msg = node_a_chan.get_channel_reestablish(&&logger);
9531 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9532 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9533 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9537 fn test_configured_holder_max_htlc_value_in_flight() {
9538 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9539 let logger = test_utils::TestLogger::new();
9540 let secp_ctx = Secp256k1::new();
9541 let seed = [42; 32];
9542 let network = Network::Testnet;
9543 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9544 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9545 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9547 let mut config_2_percent = UserConfig::default();
9548 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9549 let mut config_99_percent = UserConfig::default();
9550 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9551 let mut config_0_percent = UserConfig::default();
9552 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9553 let mut config_101_percent = UserConfig::default();
9554 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9556 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9557 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9558 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9559 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9560 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9561 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9563 // Test with the upper bound - 1 of valid values (99%).
9564 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9565 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9566 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9568 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9570 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9571 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9572 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9573 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9574 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9575 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9577 // Test with the upper bound - 1 of valid values (99%).
9578 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9579 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9580 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9582 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9583 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9584 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9585 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9586 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9588 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9589 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9591 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9592 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9593 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9595 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9596 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9597 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9598 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9599 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9601 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9602 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9604 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9605 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9606 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9610 fn test_configured_holder_selected_channel_reserve_satoshis() {
9612 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9613 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9614 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9616 // Test with valid but unreasonably high channel reserves
9617 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9618 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9619 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9621 // Test with calculated channel reserve less than lower bound
9622 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9623 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9625 // Test with invalid channel reserves since sum of both is greater than or equal
9627 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9628 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9631 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9632 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9633 let logger = test_utils::TestLogger::new();
9634 let secp_ctx = Secp256k1::new();
9635 let seed = [42; 32];
9636 let network = Network::Testnet;
9637 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9638 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9639 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9642 let mut outbound_node_config = UserConfig::default();
9643 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9644 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9646 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9647 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9649 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9650 let mut inbound_node_config = UserConfig::default();
9651 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9653 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9654 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9656 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9658 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9659 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9661 // Channel Negotiations failed
9662 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9663 assert!(result.is_err());
9668 fn channel_update() {
9669 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9670 let logger = test_utils::TestLogger::new();
9671 let secp_ctx = Secp256k1::new();
9672 let seed = [42; 32];
9673 let network = Network::Testnet;
9674 let best_block = BestBlock::from_network(network);
9675 let chain_hash = ChainHash::using_genesis_block(network);
9676 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9678 // Create Node A's channel pointing to Node B's pubkey
9679 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9680 let config = UserConfig::default();
9681 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9683 // Create Node B's channel by receiving Node A's open_channel message
9684 // Make sure A's dust limit is as we expect.
9685 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9686 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9687 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9689 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9690 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9691 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9692 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9693 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9695 // Node A --> Node B: funding created
9696 let output_script = node_a_chan.context.get_funding_redeemscript();
9697 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9698 value: 10000000, script_pubkey: output_script.clone(),
9700 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9701 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9702 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9704 // Node B --> Node A: funding signed
9705 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9706 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9708 // Make sure that receiving a channel update will update the Channel as expected.
9709 let update = ChannelUpdate {
9710 contents: UnsignedChannelUpdate {
9712 short_channel_id: 0,
9715 cltv_expiry_delta: 100,
9716 htlc_minimum_msat: 5,
9717 htlc_maximum_msat: MAX_VALUE_MSAT,
9719 fee_proportional_millionths: 11,
9720 excess_data: Vec::new(),
9722 signature: Signature::from(unsafe { FFISignature::new() })
9724 assert!(node_a_chan.channel_update(&update).unwrap());
9726 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9727 // change our official htlc_minimum_msat.
9728 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9729 match node_a_chan.context.counterparty_forwarding_info() {
9731 assert_eq!(info.cltv_expiry_delta, 100);
9732 assert_eq!(info.fee_base_msat, 110);
9733 assert_eq!(info.fee_proportional_millionths, 11);
9735 None => panic!("expected counterparty forwarding info to be Some")
9738 assert!(!node_a_chan.channel_update(&update).unwrap());
9742 fn blinding_point_skimmed_fee_malformed_ser() {
9743 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9745 let logger = test_utils::TestLogger::new();
9746 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9747 let secp_ctx = Secp256k1::new();
9748 let seed = [42; 32];
9749 let network = Network::Testnet;
9750 let best_block = BestBlock::from_network(network);
9751 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9753 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9754 let config = UserConfig::default();
9755 let features = channelmanager::provided_init_features(&config);
9756 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9757 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9759 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9760 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9761 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9763 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9764 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9765 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9767 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9768 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9769 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9770 Ok((chan, _, _)) => chan,
9771 Err((_, e)) => panic!("{}", e),
9774 let dummy_htlc_source = HTLCSource::OutboundRoute {
9776 hops: vec![RouteHop {
9777 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9778 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9779 cltv_expiry_delta: 0, maybe_announced_channel: false,
9783 session_priv: test_utils::privkey(42),
9784 first_hop_htlc_msat: 0,
9785 payment_id: PaymentId([42; 32]),
9787 let dummy_outbound_output = OutboundHTLCOutput {
9790 payment_hash: PaymentHash([43; 32]),
9792 state: OutboundHTLCState::Committed,
9793 source: dummy_htlc_source.clone(),
9794 skimmed_fee_msat: None,
9795 blinding_point: None,
9797 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9798 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9800 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9803 htlc.skimmed_fee_msat = Some(1);
9806 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9808 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9811 payment_hash: PaymentHash([43; 32]),
9812 source: dummy_htlc_source.clone(),
9813 onion_routing_packet: msgs::OnionPacket {
9815 public_key: Ok(test_utils::pubkey(1)),
9816 hop_data: [0; 20*65],
9819 skimmed_fee_msat: None,
9820 blinding_point: None,
9822 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9823 payment_preimage: PaymentPreimage([42; 32]),
9826 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9827 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9829 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9830 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9832 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9835 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9836 } else if i % 5 == 1 {
9837 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9838 } else if i % 5 == 2 {
9839 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9840 if let HTLCUpdateAwaitingACK::AddHTLC {
9841 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9842 } = &mut dummy_add {
9843 *blinding_point = Some(test_utils::pubkey(42 + i));
9844 *skimmed_fee_msat = Some(42);
9846 holding_cell_htlc_updates.push(dummy_add);
9847 } else if i % 5 == 3 {
9848 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9850 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9853 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9855 // Encode and decode the channel and ensure that the HTLCs within are the same.
9856 let encoded_chan = chan.encode();
9857 let mut s = crate::io::Cursor::new(&encoded_chan);
9858 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9859 let features = channelmanager::provided_channel_type_features(&config);
9860 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9861 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9862 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9865 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9867 fn outbound_commitment_test() {
9868 use bitcoin::sighash;
9869 use bitcoin::consensus::encode::serialize;
9870 use bitcoin::sighash::EcdsaSighashType;
9871 use bitcoin::hashes::hex::FromHex;
9872 use bitcoin::hash_types::Txid;
9873 use bitcoin::secp256k1::Message;
9874 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9875 use crate::ln::PaymentPreimage;
9876 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9877 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9878 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9879 use crate::util::logger::Logger;
9880 use crate::sync::Arc;
9881 use core::str::FromStr;
9882 use hex::DisplayHex;
9884 // Test vectors from BOLT 3 Appendices C and F (anchors):
9885 let feeest = TestFeeEstimator{fee_est: 15000};
9886 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9887 let secp_ctx = Secp256k1::new();
9889 let mut signer = InMemorySigner::new(
9891 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9892 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9893 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9894 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9895 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9897 // These aren't set in the test vectors:
9898 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9904 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9905 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9906 let keys_provider = Keys { signer: signer.clone() };
9908 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9909 let mut config = UserConfig::default();
9910 config.channel_handshake_config.announced_channel = false;
9911 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9912 chan.context.holder_dust_limit_satoshis = 546;
9913 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9915 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9917 let counterparty_pubkeys = ChannelPublicKeys {
9918 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9919 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9920 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9921 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9922 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9924 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9925 CounterpartyChannelTransactionParameters {
9926 pubkeys: counterparty_pubkeys.clone(),
9927 selected_contest_delay: 144
9929 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9930 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9932 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9933 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9935 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9936 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9938 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9939 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9941 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9942 // derived from a commitment_seed, so instead we copy it here and call
9943 // build_commitment_transaction.
9944 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9945 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9946 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9947 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9948 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9950 macro_rules! test_commitment {
9951 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9952 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9953 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9957 macro_rules! test_commitment_with_anchors {
9958 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9959 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9960 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9964 macro_rules! test_commitment_common {
9965 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9966 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9968 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9969 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9971 let htlcs = commitment_stats.htlcs_included.drain(..)
9972 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9974 (commitment_stats.tx, htlcs)
9976 let trusted_tx = commitment_tx.trust();
9977 let unsigned_tx = trusted_tx.built_transaction();
9978 let redeemscript = chan.context.get_funding_redeemscript();
9979 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9980 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9981 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9982 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9984 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9985 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9986 let mut counterparty_htlc_sigs = Vec::new();
9987 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9989 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9990 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9991 counterparty_htlc_sigs.push(remote_signature);
9993 assert_eq!(htlcs.len(), per_htlc.len());
9995 let holder_commitment_tx = HolderCommitmentTransaction::new(
9996 commitment_tx.clone(),
9997 counterparty_signature,
9998 counterparty_htlc_sigs,
9999 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
10000 chan.context.counterparty_funding_pubkey()
10002 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
10003 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
10005 let funding_redeemscript = chan.context.get_funding_redeemscript();
10006 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
10007 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
10009 // ((htlc, counterparty_sig), (index, holder_sig))
10010 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
10013 log_trace!(logger, "verifying htlc {}", $htlc_idx);
10014 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10016 let ref htlc = htlcs[$htlc_idx];
10017 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
10018 chan.context.get_counterparty_selected_contest_delay().unwrap(),
10019 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
10020 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
10021 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
10022 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
10023 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
10025 let mut preimage: Option<PaymentPreimage> = None;
10028 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
10029 if out == htlc.payment_hash {
10030 preimage = Some(PaymentPreimage([i; 32]));
10034 assert!(preimage.is_some());
10037 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10038 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10039 channel_derivation_parameters: ChannelDerivationParameters {
10040 value_satoshis: chan.context.channel_value_satoshis,
10041 keys_id: chan.context.channel_keys_id,
10042 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10044 commitment_txid: trusted_tx.txid(),
10045 per_commitment_number: trusted_tx.commitment_number(),
10046 per_commitment_point: trusted_tx.per_commitment_point(),
10047 feerate_per_kw: trusted_tx.feerate_per_kw(),
10048 htlc: htlc.clone(),
10049 preimage: preimage.clone(),
10050 counterparty_sig: *htlc_counterparty_sig,
10051 }, &secp_ctx).unwrap();
10052 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10053 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10055 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10056 assert_eq!(signature, htlc_holder_sig, "htlc sig");
10057 let trusted_tx = holder_commitment_tx.trust();
10058 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10059 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10060 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10062 assert!(htlc_counterparty_sig_iter.next().is_none());
10066 // anchors: simple commitment tx with no HTLCs and single anchor
10067 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10068 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10069 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10071 // simple commitment tx with no HTLCs
10072 chan.context.value_to_self_msat = 7000000000;
10074 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10075 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10076 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10078 // anchors: simple commitment tx with no HTLCs
10079 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10080 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10081 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10083 chan.context.pending_inbound_htlcs.push({
10084 let mut out = InboundHTLCOutput{
10086 amount_msat: 1000000,
10088 payment_hash: PaymentHash([0; 32]),
10089 state: InboundHTLCState::Committed,
10091 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10094 chan.context.pending_inbound_htlcs.push({
10095 let mut out = InboundHTLCOutput{
10097 amount_msat: 2000000,
10099 payment_hash: PaymentHash([0; 32]),
10100 state: InboundHTLCState::Committed,
10102 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10105 chan.context.pending_outbound_htlcs.push({
10106 let mut out = OutboundHTLCOutput{
10108 amount_msat: 2000000,
10110 payment_hash: PaymentHash([0; 32]),
10111 state: OutboundHTLCState::Committed,
10112 source: HTLCSource::dummy(),
10113 skimmed_fee_msat: None,
10114 blinding_point: None,
10116 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10119 chan.context.pending_outbound_htlcs.push({
10120 let mut out = OutboundHTLCOutput{
10122 amount_msat: 3000000,
10124 payment_hash: PaymentHash([0; 32]),
10125 state: OutboundHTLCState::Committed,
10126 source: HTLCSource::dummy(),
10127 skimmed_fee_msat: None,
10128 blinding_point: None,
10130 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10133 chan.context.pending_inbound_htlcs.push({
10134 let mut out = InboundHTLCOutput{
10136 amount_msat: 4000000,
10138 payment_hash: PaymentHash([0; 32]),
10139 state: InboundHTLCState::Committed,
10141 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10145 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10146 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10147 chan.context.feerate_per_kw = 0;
10149 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10150 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10151 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10154 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10155 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10156 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10159 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10160 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10161 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10164 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10165 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10166 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10169 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10170 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10171 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10174 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10175 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10176 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10179 // commitment tx with seven outputs untrimmed (maximum feerate)
10180 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10181 chan.context.feerate_per_kw = 647;
10183 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10184 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10185 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10188 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10189 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10190 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10193 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10194 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10195 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10198 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10199 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10200 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10203 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10204 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10205 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10208 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10209 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10210 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10213 // commitment tx with six outputs untrimmed (minimum feerate)
10214 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10215 chan.context.feerate_per_kw = 648;
10217 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10218 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10219 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10222 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10223 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10224 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10227 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10228 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10229 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10232 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10233 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10234 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10237 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10238 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10239 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10242 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10243 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10244 chan.context.feerate_per_kw = 645;
10245 chan.context.holder_dust_limit_satoshis = 1001;
10247 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10248 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10249 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10252 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10253 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10254 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10257 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10258 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10259 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10262 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10263 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10264 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10267 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10268 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10269 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10272 // commitment tx with six outputs untrimmed (maximum feerate)
10273 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10274 chan.context.feerate_per_kw = 2069;
10275 chan.context.holder_dust_limit_satoshis = 546;
10277 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10278 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10279 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10282 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10283 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10284 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10287 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10288 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10289 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10292 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10293 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10294 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10297 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10298 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10299 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10302 // commitment tx with five outputs untrimmed (minimum feerate)
10303 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10304 chan.context.feerate_per_kw = 2070;
10306 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10307 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10308 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10311 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10312 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10313 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10316 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10317 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10318 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10321 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10322 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10323 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10326 // commitment tx with five outputs untrimmed (maximum feerate)
10327 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10328 chan.context.feerate_per_kw = 2194;
10330 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10331 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10332 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10335 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10336 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10337 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10340 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10341 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10342 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10345 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10346 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10347 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10350 // commitment tx with four outputs untrimmed (minimum feerate)
10351 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10352 chan.context.feerate_per_kw = 2195;
10354 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10355 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10356 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10359 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10360 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10361 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10364 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10365 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10366 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10369 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10370 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10371 chan.context.feerate_per_kw = 2185;
10372 chan.context.holder_dust_limit_satoshis = 2001;
10373 let cached_channel_type = chan.context.channel_type;
10374 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10376 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10377 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10378 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10381 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10382 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10383 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10386 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10387 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10388 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10391 // commitment tx with four outputs untrimmed (maximum feerate)
10392 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10393 chan.context.feerate_per_kw = 3702;
10394 chan.context.holder_dust_limit_satoshis = 546;
10395 chan.context.channel_type = cached_channel_type.clone();
10397 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10398 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10399 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10402 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10403 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10404 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10407 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10408 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10409 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10412 // commitment tx with three outputs untrimmed (minimum feerate)
10413 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10414 chan.context.feerate_per_kw = 3703;
10416 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10417 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10418 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10421 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10422 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10423 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10426 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10427 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10428 chan.context.feerate_per_kw = 3687;
10429 chan.context.holder_dust_limit_satoshis = 3001;
10430 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10432 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10433 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10434 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10437 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10438 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10439 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10442 // commitment tx with three outputs untrimmed (maximum feerate)
10443 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10444 chan.context.feerate_per_kw = 4914;
10445 chan.context.holder_dust_limit_satoshis = 546;
10446 chan.context.channel_type = cached_channel_type.clone();
10448 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10449 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10450 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10453 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10454 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10455 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10458 // commitment tx with two outputs untrimmed (minimum feerate)
10459 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10460 chan.context.feerate_per_kw = 4915;
10461 chan.context.holder_dust_limit_satoshis = 546;
10463 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10464 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10465 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10467 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10468 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10469 chan.context.feerate_per_kw = 4894;
10470 chan.context.holder_dust_limit_satoshis = 4001;
10471 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10473 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10474 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10475 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10477 // commitment tx with two outputs untrimmed (maximum feerate)
10478 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10479 chan.context.feerate_per_kw = 9651180;
10480 chan.context.holder_dust_limit_satoshis = 546;
10481 chan.context.channel_type = cached_channel_type.clone();
10483 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10484 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10485 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10487 // commitment tx with one output untrimmed (minimum feerate)
10488 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10489 chan.context.feerate_per_kw = 9651181;
10491 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10492 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10493 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10495 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10496 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10497 chan.context.feerate_per_kw = 6216010;
10498 chan.context.holder_dust_limit_satoshis = 4001;
10499 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10501 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10502 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10503 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10505 // commitment tx with fee greater than funder amount
10506 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10507 chan.context.feerate_per_kw = 9651936;
10508 chan.context.holder_dust_limit_satoshis = 546;
10509 chan.context.channel_type = cached_channel_type;
10511 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10512 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10513 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10515 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10516 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10517 chan.context.feerate_per_kw = 253;
10518 chan.context.pending_inbound_htlcs.clear();
10519 chan.context.pending_inbound_htlcs.push({
10520 let mut out = InboundHTLCOutput{
10522 amount_msat: 2000000,
10524 payment_hash: PaymentHash([0; 32]),
10525 state: InboundHTLCState::Committed,
10527 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10530 chan.context.pending_outbound_htlcs.clear();
10531 chan.context.pending_outbound_htlcs.push({
10532 let mut out = OutboundHTLCOutput{
10534 amount_msat: 5000001,
10536 payment_hash: PaymentHash([0; 32]),
10537 state: OutboundHTLCState::Committed,
10538 source: HTLCSource::dummy(),
10539 skimmed_fee_msat: None,
10540 blinding_point: None,
10542 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10545 chan.context.pending_outbound_htlcs.push({
10546 let mut out = OutboundHTLCOutput{
10548 amount_msat: 5000000,
10550 payment_hash: PaymentHash([0; 32]),
10551 state: OutboundHTLCState::Committed,
10552 source: HTLCSource::dummy(),
10553 skimmed_fee_msat: None,
10554 blinding_point: None,
10556 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10560 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10561 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10562 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10565 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10566 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10567 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10569 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10570 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10571 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10573 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10574 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10575 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10578 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10579 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10580 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10581 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10584 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10585 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10586 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10588 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10589 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10590 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10592 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10593 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10594 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10599 fn test_per_commitment_secret_gen() {
10600 // Test vectors from BOLT 3 Appendix D:
10602 let mut seed = [0; 32];
10603 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10604 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10605 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10607 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10608 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10609 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10611 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10612 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10614 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10615 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10617 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10618 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10619 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10623 fn test_key_derivation() {
10624 // Test vectors from BOLT 3 Appendix E:
10625 let secp_ctx = Secp256k1::new();
10627 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10628 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10630 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10631 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10633 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10634 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10636 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10637 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10639 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10640 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10642 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10643 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10647 fn test_zero_conf_channel_type_support() {
10648 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10649 let secp_ctx = Secp256k1::new();
10650 let seed = [42; 32];
10651 let network = Network::Testnet;
10652 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10653 let logger = test_utils::TestLogger::new();
10655 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10656 let config = UserConfig::default();
10657 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10658 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10660 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10661 channel_type_features.set_zero_conf_required();
10663 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10664 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10665 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10666 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10667 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10668 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10669 assert!(res.is_ok());
10673 fn test_supports_anchors_zero_htlc_tx_fee() {
10674 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10675 // resulting `channel_type`.
10676 let secp_ctx = Secp256k1::new();
10677 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10678 let network = Network::Testnet;
10679 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10680 let logger = test_utils::TestLogger::new();
10682 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10683 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10685 let mut config = UserConfig::default();
10686 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10688 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10689 // need to signal it.
10690 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10691 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10692 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10693 &config, 0, 42, None
10695 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10697 let mut expected_channel_type = ChannelTypeFeatures::empty();
10698 expected_channel_type.set_static_remote_key_required();
10699 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10701 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10702 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10703 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10707 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10708 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10709 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10710 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10711 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10714 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10715 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10719 fn test_rejects_implicit_simple_anchors() {
10720 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10721 // each side's `InitFeatures`, it is rejected.
10722 let secp_ctx = Secp256k1::new();
10723 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10724 let network = Network::Testnet;
10725 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10726 let logger = test_utils::TestLogger::new();
10728 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10729 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10731 let config = UserConfig::default();
10733 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10734 let static_remote_key_required: u64 = 1 << 12;
10735 let simple_anchors_required: u64 = 1 << 20;
10736 let raw_init_features = static_remote_key_required | simple_anchors_required;
10737 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10739 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10740 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10741 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10745 // Set `channel_type` to `None` to force the implicit feature negotiation.
10746 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10747 open_channel_msg.common_fields.channel_type = None;
10749 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10750 // `static_remote_key`, it will fail the channel.
10751 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10752 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10753 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10754 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10756 assert!(channel_b.is_err());
10760 fn test_rejects_simple_anchors_channel_type() {
10761 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10763 let secp_ctx = Secp256k1::new();
10764 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10765 let network = Network::Testnet;
10766 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10767 let logger = test_utils::TestLogger::new();
10769 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10770 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10772 let config = UserConfig::default();
10774 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10775 let static_remote_key_required: u64 = 1 << 12;
10776 let simple_anchors_required: u64 = 1 << 20;
10777 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10778 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10779 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10780 assert!(!simple_anchors_init.requires_unknown_bits());
10781 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10783 // First, we'll try to open a channel between A and B where A requests a channel type for
10784 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10785 // B as it's not supported by LDK.
10786 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10787 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10788 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10792 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10793 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10795 let res = InboundV1Channel::<&TestKeysInterface>::new(
10796 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10797 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10798 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10800 assert!(res.is_err());
10802 // Then, we'll try to open another channel where A requests a channel type for
10803 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10804 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10806 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10807 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10808 10000000, 100000, 42, &config, 0, 42, None
10811 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10813 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10814 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10815 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10816 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10819 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10820 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10822 let res = channel_a.accept_channel(
10823 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10825 assert!(res.is_err());
10829 fn test_waiting_for_batch() {
10830 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10831 let logger = test_utils::TestLogger::new();
10832 let secp_ctx = Secp256k1::new();
10833 let seed = [42; 32];
10834 let network = Network::Testnet;
10835 let best_block = BestBlock::from_network(network);
10836 let chain_hash = ChainHash::using_genesis_block(network);
10837 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10839 let mut config = UserConfig::default();
10840 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10841 // channel in a batch before all channels are ready.
10842 config.channel_handshake_limits.trust_own_funding_0conf = true;
10844 // Create a channel from node a to node b that will be part of batch funding.
10845 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10846 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10851 &channelmanager::provided_init_features(&config),
10861 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10862 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10863 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10868 &channelmanager::provided_channel_type_features(&config),
10869 &channelmanager::provided_init_features(&config),
10875 true, // Allow node b to send a 0conf channel_ready.
10878 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10879 node_a_chan.accept_channel(
10880 &accept_channel_msg,
10881 &config.channel_handshake_limits,
10882 &channelmanager::provided_init_features(&config),
10885 // Fund the channel with a batch funding transaction.
10886 let output_script = node_a_chan.context.get_funding_redeemscript();
10887 let tx = Transaction {
10889 lock_time: LockTime::ZERO,
10893 value: 10000000, script_pubkey: output_script.clone(),
10896 value: 10000000, script_pubkey: Builder::new().into_script(),
10899 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10900 let funding_created_msg = node_a_chan.get_funding_created(
10901 tx.clone(), funding_outpoint, true, &&logger,
10902 ).map_err(|_| ()).unwrap();
10903 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10904 &funding_created_msg.unwrap(),
10908 ).map_err(|_| ()).unwrap();
10909 let node_b_updates = node_b_chan.monitor_updating_restored(
10917 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10918 // broadcasting the funding transaction until the batch is ready.
10919 let res = node_a_chan.funding_signed(
10920 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10922 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10923 let node_a_updates = node_a_chan.monitor_updating_restored(
10930 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10931 // as the funding transaction depends on all channels in the batch becoming ready.
10932 assert!(node_a_updates.channel_ready.is_none());
10933 assert!(node_a_updates.funding_broadcastable.is_none());
10934 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10936 // It is possible to receive a 0conf channel_ready from the remote node.
10937 node_a_chan.channel_ready(
10938 &node_b_updates.channel_ready.unwrap(),
10946 node_a_chan.context.channel_state,
10947 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10950 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10951 node_a_chan.set_batch_ready();
10952 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10953 assert!(node_a_chan.check_get_channel_ready(0).is_some());