1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 /// Exposes the state of pending inbound HTLCs.
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 /// through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 /// through the exchange of commitment_signed and revoke_and_ack messages.
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178 /// before this HTLC is included on the remote commitment transaction.
179 AwaitingRemoteRevokeToAdd,
180 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181 /// and is included in both commitment transactions.
183 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186 /// payment, it will only be claimed together with other required parts.
188 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191 /// commitment transaction after update_fulfill_htlc.
192 AwaitingRemoteRevokeToRemoveFulfill,
193 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
197 AwaitingRemoteRevokeToRemoveFail,
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
203 InboundHTLCState::RemoteAnnounced(_) => None,
204 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208 InboundHTLCState::Committed =>
209 Some(InboundHTLCStateDetails::Committed),
210 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221 (0, AwaitingRemoteRevokeToAdd) => {},
222 (2, Committed) => {},
223 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224 (6, AwaitingRemoteRevokeToRemoveFail) => {};
227 struct InboundHTLCOutput {
231 payment_hash: PaymentHash,
232 state: InboundHTLCState,
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
239 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241 /// and not part of any commitment transaction.
243 /// The amount in msat.
244 pub amount_msat: u64,
245 /// The block height at which this HTLC expires.
246 pub cltv_expiry: u32,
247 /// The payment hash.
248 pub payment_hash: PaymentHash,
249 /// The state of the HTLC in the state machine.
251 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252 /// waiting for to advance to the next state.
254 /// See [`InboundHTLCStateDetails`] for information on the specific states.
256 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257 /// states may result in `None` here.
258 pub state: Option<InboundHTLCStateDetails>,
259 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260 /// from the local commitment transaction and added to the commitment transaction fee.
261 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262 /// transactions as well.
264 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
268 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274 (0, htlc_id, required),
275 (2, amount_msat, required),
276 (4, cltv_expiry, required),
277 (6, payment_hash, required),
278 (7, state, upgradable_option),
279 (8, is_dust, required),
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286 /// we will promote to Committed (note that they may not accept it until the next time we
287 /// revoke, but we don't really care about that:
288 /// * they've revoked, so worst case we can announce an old state and get our (option on)
289 /// money back (though we won't), and,
290 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
291 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
292 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293 /// we'll never get out of sync).
294 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295 /// OutboundHTLCOutput's size just for a temporary bit
296 LocalAnnounced(Box<msgs::OnionPacket>),
298 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299 /// the change (though they'll need to revoke before we fail the payment).
300 RemoteRemoved(OutboundHTLCOutcome),
301 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305 /// remote revoke_and_ack on a previous state before we can do so.
306 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311 /// revoke_and_ack to drop completely.
312 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
315 /// Exposes the state of pending outbound HTLCs.
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 /// through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 /// through the exchange of commitment_signed and revoke_and_ack messages.
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331 /// on the remote's commitment transaction after update_add_htlc.
332 AwaitingRemoteRevokeToAdd,
333 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334 /// and receiving revoke_and_ack in return.
336 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
339 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342 /// for the removal from its commitment transaction.
343 AwaitingRemoteRevokeToRemoveSuccess,
344 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347 /// for the removal from its commitment transaction.
348 AwaitingRemoteRevokeToRemoveFailure,
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
354 OutboundHTLCState::LocalAnnounced(_) =>
355 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356 OutboundHTLCState::Committed =>
357 OutboundHTLCStateDetails::Committed,
358 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
360 OutboundHTLCState::RemoteRemoved(_) =>
361 OutboundHTLCStateDetails::Committed,
362 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375 (0, AwaitingRemoteRevokeToAdd) => {},
376 (2, Committed) => {},
377 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384 /// LDK version 0.0.105+ will always fill in the preimage here.
385 Success(Option<PaymentPreimage>),
386 Failure(HTLCFailReason),
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390 fn from(o: Option<HTLCFailReason>) -> Self {
392 None => OutboundHTLCOutcome::Success(None),
393 Some(r) => OutboundHTLCOutcome::Failure(r)
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399 fn into(self) -> Option<&'a HTLCFailReason> {
401 OutboundHTLCOutcome::Success(_) => None,
402 OutboundHTLCOutcome::Failure(ref r) => Some(r)
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
412 payment_hash: PaymentHash,
413 state: OutboundHTLCState,
415 blinding_point: Option<PublicKey>,
416 skimmed_fee_msat: Option<u64>,
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
423 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425 /// and not part of any commitment transaction.
427 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428 pub htlc_id: Option<u64>,
429 /// The amount in msat.
430 pub amount_msat: u64,
431 /// The block height at which this HTLC expires.
432 pub cltv_expiry: u32,
433 /// The payment hash.
434 pub payment_hash: PaymentHash,
435 /// The state of the HTLC in the state machine.
437 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438 /// waiting for to advance to the next state.
440 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
442 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443 /// states may result in `None` here.
444 pub state: Option<OutboundHTLCStateDetails>,
445 /// The extra fee being skimmed off the top of this HTLC.
446 pub skimmed_fee_msat: Option<u64>,
447 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448 /// from the local commitment transaction and added to the commitment transaction fee.
449 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450 /// transactions as well.
452 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
456 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462 (0, htlc_id, required),
463 (2, amount_msat, required),
464 (4, cltv_expiry, required),
465 (6, payment_hash, required),
466 (7, state, upgradable_option),
467 (8, skimmed_fee_msat, required),
468 (10, is_dust, required),
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
478 payment_hash: PaymentHash,
480 onion_routing_packet: msgs::OnionPacket,
481 // The extra fee we're skimming off the top of this HTLC.
482 skimmed_fee_msat: Option<u64>,
483 blinding_point: Option<PublicKey>,
486 payment_preimage: PaymentPreimage,
491 err_packet: msgs::OnionErrorPacket,
496 sha256_of_onion: [u8; 32],
500 macro_rules! define_state_flags {
501 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502 #[doc = $flag_type_doc]
503 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504 struct $flag_type(u32);
509 const $flag: $flag_type = $flag_type($value);
512 /// All flags that apply to the specified [`ChannelState`] variant.
514 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
517 fn new() -> Self { Self(0) }
520 fn from_u32(flags: u32) -> Result<Self, ()> {
521 if flags & !Self::ALL.0 != 0 {
524 Ok($flag_type(flags))
529 fn is_empty(&self) -> bool { self.0 == 0 }
531 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
533 fn set(&mut self, flag: Self) { *self |= flag }
535 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
539 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
542 impl core::ops::BitOr for $flag_type {
544 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
546 impl core::ops::BitOrAssign for $flag_type {
547 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
549 impl core::ops::BitAnd for $flag_type {
551 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
553 impl core::ops::BitAndAssign for $flag_type {
554 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
557 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
560 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
563 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
565 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
567 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
570 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
573 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
582 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
584 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
586 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
589 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
591 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
593 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
596 impl PartialEq<FundedStateFlags> for $flag_type {
597 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
599 impl From<FundedStateFlags> for $flag_type {
600 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
605 /// We declare all the states/flags here together to help determine which bits are still available
608 pub const OUR_INIT_SENT: u32 = 1 << 0;
609 pub const THEIR_INIT_SENT: u32 = 1 << 1;
610 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614 pub const CHANNEL_READY: u32 = 1 << 6;
615 pub const PEER_DISCONNECTED: u32 = 1 << 7;
616 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
625 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
627 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631 somewhere and we should pause sending any outbound messages until they've managed to \
632 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
645 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646 NegotiatingFundingFlags, [
647 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
655 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656 FUNDED_STATE, AwaitingChannelReadyFlags, [
657 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666 is being held until all channels in the batch have received `funding_signed` and have \
667 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
673 "Flags that only apply to [`ChannelState::ChannelReady`].",
674 FUNDED_STATE, ChannelReadyFlags, [
675 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
688 /// We are negotiating the parameters required for the channel prior to funding it.
689 NegotiatingFunding(NegotiatingFundingFlags),
690 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
694 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695 /// funding transaction to confirm.
696 AwaitingChannelReady(AwaitingChannelReadyFlags),
697 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
699 ChannelReady(ChannelReadyFlags),
700 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701 /// is about to drop us, but we store this anyway.
705 macro_rules! impl_state_flag {
706 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
708 fn $get(&self) -> bool {
711 ChannelState::$state(flags) => flags.$get(),
720 ChannelState::$state(flags) => flags.$set(),
722 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
726 fn $clear(&mut self) {
729 ChannelState::$state(flags) => { let _ = flags.$clear(); },
731 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
735 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
738 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739 impl_state_flag!($get, $set, $clear, [$state]);
744 fn from_u32(state: u32) -> Result<Self, ()> {
746 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
749 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754 .map(|flags| ChannelState::ChannelReady(flags))
755 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756 Ok(ChannelState::NegotiatingFunding(flags))
764 fn to_u32(&self) -> u32 {
766 ChannelState::NegotiatingFunding(flags) => flags.0,
767 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
774 fn is_pre_funded_state(&self) -> bool {
775 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
778 fn is_both_sides_shutdown(&self) -> bool {
779 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
782 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
784 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786 _ => FundedStateFlags::new(),
790 fn can_generate_new_commitment(&self) -> bool {
792 ChannelState::ChannelReady(flags) =>
793 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
797 debug_assert!(false, "Can only generate new commitment within ChannelReady");
803 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
875 impl fmt::Debug for ChannelError {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
878 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
885 impl fmt::Display for ChannelError {
886 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
888 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889 &ChannelError::Warn(ref e) => write!(f, "{}", e),
890 &ChannelError::Close(ref e) => write!(f, "{}", e),
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
897 pub peer_id: Option<PublicKey>,
898 pub channel_id: Option<ChannelId>,
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902 fn log(&self, mut record: Record) {
903 record.peer_id = self.peer_id;
904 record.channel_id = self.channel_id;
905 self.logger.log(record)
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912 where S::Target: SignerProvider
916 peer_id: Some(context.counterparty_node_id),
917 channel_id: Some(context.channel_id),
922 macro_rules! secp_check {
923 ($res: expr, $err: expr) => {
926 Err(_) => return Err(ChannelError::Close($err)),
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938 /// We've announced the channel as enabled and are connected to our peer.
940 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
942 /// Our channel is live again, but we haven't announced the channel as enabled yet.
944 /// We've announced the channel as disabled.
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
950 pub enum AnnouncementSigsState {
951 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952 /// we sent the last `AnnouncementSignatures`.
954 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955 /// This state never appears on disk - instead we write `NotSent`.
957 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960 /// they send back a `RevokeAndACK`.
961 /// This state never appears on disk - instead we write `NotSent`.
963 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
968 /// An enum indicating whether the local or remote side offered a given HTLC.
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
977 pending_htlcs_value_msat: u64,
978 on_counterparty_tx_dust_exposure_msat: u64,
979 on_holder_tx_dust_exposure_msat: u64,
980 holding_cell_msat: u64,
981 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986 tx: CommitmentTransaction, // the transaction info
987 feerate_per_kw: u32, // the feerate included to build the transaction
988 total_fee_sat: u64, // the total fee included in the transaction
989 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
990 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991 local_balance_msat: u64, // local balance before fees *not* considering dust limits
992 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
1000 origin: HTLCInitiator,
1003 impl HTLCCandidate {
1004 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1014 enum UpdateFulfillFetch {
1016 monitor_update: ChannelMonitorUpdate,
1017 htlc_value_msat: u64,
1018 msg: Option<msgs::UpdateFulfillHTLC>,
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027 /// previously placed in the holding cell (and has since been removed).
1029 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030 monitor_update: ChannelMonitorUpdate,
1031 /// The value of the HTLC which was claimed, in msat.
1032 htlc_value_msat: u64,
1034 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035 /// or has been forgotten (presumably previously claimed).
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041 pub raa: Option<msgs::RevokeAndACK>,
1042 pub commitment_update: Option<msgs::CommitmentUpdate>,
1043 pub order: RAACommitmentOrder,
1044 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047 pub funding_broadcastable: Option<Transaction>,
1048 pub channel_ready: Option<msgs::ChannelReady>,
1049 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1052 /// The return value of `signer_maybe_unblocked`
1054 pub(super) struct SignerResumeUpdates {
1055 pub commitment_update: Option<msgs::CommitmentUpdate>,
1056 pub funding_signed: Option<msgs::FundingSigned>,
1057 pub channel_ready: Option<msgs::ChannelReady>,
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062 pub channel_ready: Option<msgs::ChannelReady>,
1063 pub raa: Option<msgs::RevokeAndACK>,
1064 pub commitment_update: Option<msgs::CommitmentUpdate>,
1065 pub order: RAACommitmentOrder,
1066 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067 pub shutdown_msg: Option<msgs::Shutdown>,
1070 /// The result of a shutdown that should be handled.
1072 pub(crate) struct ShutdownResult {
1073 pub(crate) closure_reason: ClosureReason,
1074 /// A channel monitor update to apply.
1075 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079 /// propagated to the remainder of the batch.
1080 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081 pub(crate) channel_id: ChannelId,
1082 pub(crate) user_channel_id: u128,
1083 pub(crate) channel_capacity_satoshis: u64,
1084 pub(crate) counterparty_node_id: PublicKey,
1085 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086 pub(crate) channel_funding_txo: Option<OutPoint>,
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1130 /// * The expected interval between ticks (1 minute).
1131 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1151 struct PendingChannelMonitorUpdate {
1152 update: ChannelMonitorUpdate,
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156 (0, update, required),
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162 UnfundedOutboundV1(OutboundV1Channel<SP>),
1163 UnfundedInboundV1(InboundV1Channel<SP>),
1164 Funded(Channel<SP>),
1167 impl<'a, SP: Deref> ChannelPhase<SP> where
1168 SP::Target: SignerProvider,
1169 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1171 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1173 ChannelPhase::Funded(chan) => &chan.context,
1174 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1175 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1179 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1181 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1182 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1183 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1188 /// Contains all state common to unfunded inbound/outbound channels.
1189 pub(super) struct UnfundedChannelContext {
1190 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1191 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1192 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1194 /// This is so that we don't keep channels around that haven't progressed to a funded state
1195 /// in a timely manner.
1196 unfunded_channel_age_ticks: usize,
1199 impl UnfundedChannelContext {
1200 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1201 /// having reached the unfunded channel age limit.
1203 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1204 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1205 self.unfunded_channel_age_ticks += 1;
1206 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1210 /// Contains everything about the channel including state, and various flags.
1211 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1212 config: LegacyChannelConfig,
1214 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1215 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1216 // have elapsed since the update occurred.
1217 prev_config: Option<(ChannelConfig, usize)>,
1219 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1223 /// The current channel ID.
1224 channel_id: ChannelId,
1225 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1226 /// Will be `None` for channels created prior to 0.0.115.
1227 temporary_channel_id: Option<ChannelId>,
1228 channel_state: ChannelState,
1230 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1231 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1233 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1234 // Note that a number of our tests were written prior to the behavior here which retransmits
1235 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1237 #[cfg(any(test, feature = "_test_utils"))]
1238 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1239 #[cfg(not(any(test, feature = "_test_utils")))]
1240 announcement_sigs_state: AnnouncementSigsState,
1242 secp_ctx: Secp256k1<secp256k1::All>,
1243 channel_value_satoshis: u64,
1245 latest_monitor_update_id: u64,
1247 holder_signer: ChannelSignerType<SP>,
1248 shutdown_scriptpubkey: Option<ShutdownScript>,
1249 destination_script: ScriptBuf,
1251 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1252 // generation start at 0 and count up...this simplifies some parts of implementation at the
1253 // cost of others, but should really just be changed.
1255 cur_holder_commitment_transaction_number: u64,
1256 cur_counterparty_commitment_transaction_number: u64,
1257 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1258 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1259 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1260 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1262 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1263 /// need to ensure we resend them in the order we originally generated them. Note that because
1264 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1265 /// sufficient to simply set this to the opposite of any message we are generating as we
1266 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1267 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1269 resend_order: RAACommitmentOrder,
1271 monitor_pending_channel_ready: bool,
1272 monitor_pending_revoke_and_ack: bool,
1273 monitor_pending_commitment_signed: bool,
1275 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1276 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1277 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1278 // but need to handle this somehow or we run the risk of losing HTLCs!
1279 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1280 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1281 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1283 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1284 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1285 /// the future when the signer indicates it may have a signature for us.
1287 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1288 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1289 signer_pending_commitment_update: bool,
1290 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1291 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1292 /// outbound or inbound.
1293 signer_pending_funding: bool,
1295 // pending_update_fee is filled when sending and receiving update_fee.
1297 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1298 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1299 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1300 // HTLCs with similar state.
1301 pending_update_fee: Option<(u32, FeeUpdateState)>,
1302 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1303 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1304 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1305 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1306 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1307 holding_cell_update_fee: Option<u32>,
1308 next_holder_htlc_id: u64,
1309 next_counterparty_htlc_id: u64,
1310 feerate_per_kw: u32,
1312 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1313 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1314 /// new block is received, ensuring it's always at least moderately close to the current real
1316 update_time_counter: u32,
1318 #[cfg(debug_assertions)]
1319 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1320 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1321 #[cfg(debug_assertions)]
1322 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1323 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1325 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1326 target_closing_feerate_sats_per_kw: Option<u32>,
1328 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1329 /// update, we need to delay processing it until later. We do that here by simply storing the
1330 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1331 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1333 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1334 /// transaction. These are set once we reach `closing_negotiation_ready`.
1336 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1338 closing_fee_limits: Option<(u64, u64)>,
1340 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1341 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1342 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1343 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1344 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1346 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1347 /// until we see a `commitment_signed` before doing so.
1349 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1350 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1351 expecting_peer_commitment_signed: bool,
1353 /// The hash of the block in which the funding transaction was included.
1354 funding_tx_confirmed_in: Option<BlockHash>,
1355 funding_tx_confirmation_height: u32,
1356 short_channel_id: Option<u64>,
1357 /// Either the height at which this channel was created or the height at which it was last
1358 /// serialized if it was serialized by versions prior to 0.0.103.
1359 /// We use this to close if funding is never broadcasted.
1360 channel_creation_height: u32,
1362 counterparty_dust_limit_satoshis: u64,
1365 pub(super) holder_dust_limit_satoshis: u64,
1367 holder_dust_limit_satoshis: u64,
1370 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1372 counterparty_max_htlc_value_in_flight_msat: u64,
1375 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1377 holder_max_htlc_value_in_flight_msat: u64,
1379 /// minimum channel reserve for self to maintain - set by them.
1380 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1383 pub(super) holder_selected_channel_reserve_satoshis: u64,
1385 holder_selected_channel_reserve_satoshis: u64,
1387 counterparty_htlc_minimum_msat: u64,
1388 holder_htlc_minimum_msat: u64,
1390 pub counterparty_max_accepted_htlcs: u16,
1392 counterparty_max_accepted_htlcs: u16,
1393 holder_max_accepted_htlcs: u16,
1394 minimum_depth: Option<u32>,
1396 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1398 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1399 funding_transaction: Option<Transaction>,
1400 is_batch_funding: Option<()>,
1402 counterparty_cur_commitment_point: Option<PublicKey>,
1403 counterparty_prev_commitment_point: Option<PublicKey>,
1404 counterparty_node_id: PublicKey,
1406 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1408 commitment_secrets: CounterpartyCommitmentSecrets,
1410 channel_update_status: ChannelUpdateStatus,
1411 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1412 /// not complete within a single timer tick (one minute), we should force-close the channel.
1413 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1415 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1416 /// our peer and start the closing_signed negotiation fresh.
1417 closing_signed_in_flight: bool,
1419 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1420 /// This can be used to rebroadcast the channel_announcement message later.
1421 announcement_sigs: Option<(Signature, Signature)>,
1423 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1424 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1425 // be, by comparing the cached values to the fee of the tranaction generated by
1426 // `build_commitment_transaction`.
1427 #[cfg(any(test, fuzzing))]
1428 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1429 #[cfg(any(test, fuzzing))]
1430 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1432 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1433 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1434 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1435 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1436 /// message until we receive a channel_reestablish.
1438 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1439 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1441 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1442 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1443 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1444 /// unblock the state machine.
1446 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1447 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1448 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1450 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1451 /// [`msgs::RevokeAndACK`] message from the counterparty.
1452 sent_message_awaiting_response: Option<usize>,
1454 #[cfg(any(test, fuzzing))]
1455 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1456 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1457 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1458 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1459 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1460 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1461 historical_inbound_htlc_fulfills: HashSet<u64>,
1463 /// This channel's type, as negotiated during channel open
1464 channel_type: ChannelTypeFeatures,
1466 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1467 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1468 // the channel's funding UTXO.
1470 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1471 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1472 // associated channel mapping.
1474 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1475 // to store all of them.
1476 latest_inbound_scid_alias: Option<u64>,
1478 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1479 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1480 // don't currently support node id aliases and eventually privacy should be provided with
1481 // blinded paths instead of simple scid+node_id aliases.
1482 outbound_scid_alias: u64,
1484 // We track whether we already emitted a `ChannelPending` event.
1485 channel_pending_event_emitted: bool,
1487 // We track whether we already emitted a `ChannelReady` event.
1488 channel_ready_event_emitted: bool,
1490 /// Some if we initiated to shut down the channel.
1491 local_initiated_shutdown: Option<()>,
1493 /// The unique identifier used to re-derive the private key material for the channel through
1494 /// [`SignerProvider::derive_channel_signer`].
1496 channel_keys_id: [u8; 32],
1498 pub channel_keys_id: [u8; 32],
1500 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1501 /// store it here and only release it to the `ChannelManager` once it asks for it.
1502 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1505 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1506 /// Allowed in any state (including after shutdown)
1507 pub fn get_update_time_counter(&self) -> u32 {
1508 self.update_time_counter
1511 pub fn get_latest_monitor_update_id(&self) -> u64 {
1512 self.latest_monitor_update_id
1515 pub fn should_announce(&self) -> bool {
1516 self.config.announced_channel
1519 pub fn is_outbound(&self) -> bool {
1520 self.channel_transaction_parameters.is_outbound_from_holder
1523 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1524 /// Allowed in any state (including after shutdown)
1525 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1526 self.config.options.forwarding_fee_base_msat
1529 /// Returns true if we've ever received a message from the remote end for this Channel
1530 pub fn have_received_message(&self) -> bool {
1531 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1534 /// Returns true if this channel is fully established and not known to be closing.
1535 /// Allowed in any state (including after shutdown)
1536 pub fn is_usable(&self) -> bool {
1537 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1538 !self.channel_state.is_local_shutdown_sent() &&
1539 !self.channel_state.is_remote_shutdown_sent() &&
1540 !self.monitor_pending_channel_ready
1543 /// shutdown state returns the state of the channel in its various stages of shutdown
1544 pub fn shutdown_state(&self) -> ChannelShutdownState {
1545 match self.channel_state {
1546 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1547 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1548 ChannelShutdownState::ShutdownInitiated
1549 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1550 ChannelShutdownState::ResolvingHTLCs
1551 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1552 ChannelShutdownState::NegotiatingClosingFee
1554 ChannelShutdownState::NotShuttingDown
1556 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1557 _ => ChannelShutdownState::NotShuttingDown,
1561 fn closing_negotiation_ready(&self) -> bool {
1562 let is_ready_to_close = match self.channel_state {
1563 ChannelState::AwaitingChannelReady(flags) =>
1564 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1565 ChannelState::ChannelReady(flags) =>
1566 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1569 self.pending_inbound_htlcs.is_empty() &&
1570 self.pending_outbound_htlcs.is_empty() &&
1571 self.pending_update_fee.is_none() &&
1575 /// Returns true if this channel is currently available for use. This is a superset of
1576 /// is_usable() and considers things like the channel being temporarily disabled.
1577 /// Allowed in any state (including after shutdown)
1578 pub fn is_live(&self) -> bool {
1579 self.is_usable() && !self.channel_state.is_peer_disconnected()
1582 // Public utilities:
1584 pub fn channel_id(&self) -> ChannelId {
1588 // Return the `temporary_channel_id` used during channel establishment.
1590 // Will return `None` for channels created prior to LDK version 0.0.115.
1591 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1592 self.temporary_channel_id
1595 pub fn minimum_depth(&self) -> Option<u32> {
1599 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1600 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1601 pub fn get_user_id(&self) -> u128 {
1605 /// Gets the channel's type
1606 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1610 /// Gets the channel's `short_channel_id`.
1612 /// Will return `None` if the channel hasn't been confirmed yet.
1613 pub fn get_short_channel_id(&self) -> Option<u64> {
1614 self.short_channel_id
1617 /// Allowed in any state (including after shutdown)
1618 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1619 self.latest_inbound_scid_alias
1622 /// Allowed in any state (including after shutdown)
1623 pub fn outbound_scid_alias(&self) -> u64 {
1624 self.outbound_scid_alias
1627 /// Returns the holder signer for this channel.
1629 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1630 return &self.holder_signer
1633 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1634 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1635 /// or prior to any channel actions during `Channel` initialization.
1636 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1637 debug_assert_eq!(self.outbound_scid_alias, 0);
1638 self.outbound_scid_alias = outbound_scid_alias;
1641 /// Returns the funding_txo we either got from our peer, or were given by
1642 /// get_funding_created.
1643 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1644 self.channel_transaction_parameters.funding_outpoint
1647 /// Returns the height in which our funding transaction was confirmed.
1648 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1649 let conf_height = self.funding_tx_confirmation_height;
1650 if conf_height > 0 {
1657 /// Returns the block hash in which our funding transaction was confirmed.
1658 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1659 self.funding_tx_confirmed_in
1662 /// Returns the current number of confirmations on the funding transaction.
1663 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1664 if self.funding_tx_confirmation_height == 0 {
1665 // We either haven't seen any confirmation yet, or observed a reorg.
1669 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1672 fn get_holder_selected_contest_delay(&self) -> u16 {
1673 self.channel_transaction_parameters.holder_selected_contest_delay
1676 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1677 &self.channel_transaction_parameters.holder_pubkeys
1680 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1681 self.channel_transaction_parameters.counterparty_parameters
1682 .as_ref().map(|params| params.selected_contest_delay)
1685 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1686 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1689 /// Allowed in any state (including after shutdown)
1690 pub fn get_counterparty_node_id(&self) -> PublicKey {
1691 self.counterparty_node_id
1694 /// Allowed in any state (including after shutdown)
1695 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1696 self.holder_htlc_minimum_msat
1699 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1700 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1701 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1704 /// Allowed in any state (including after shutdown)
1705 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1707 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1708 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1709 // channel might have been used to route very small values (either by honest users or as DoS).
1710 self.channel_value_satoshis * 1000 * 9 / 10,
1712 self.counterparty_max_htlc_value_in_flight_msat
1716 /// Allowed in any state (including after shutdown)
1717 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1718 self.counterparty_htlc_minimum_msat
1721 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1722 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1723 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1726 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1727 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1728 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1730 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1731 party_max_htlc_value_in_flight_msat
1736 pub fn get_value_satoshis(&self) -> u64 {
1737 self.channel_value_satoshis
1740 pub fn get_fee_proportional_millionths(&self) -> u32 {
1741 self.config.options.forwarding_fee_proportional_millionths
1744 pub fn get_cltv_expiry_delta(&self) -> u16 {
1745 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1748 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1749 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1750 where F::Target: FeeEstimator
1752 match self.config.options.max_dust_htlc_exposure {
1753 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1754 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1755 ConfirmationTarget::OnChainSweep) as u64;
1756 feerate_per_kw.saturating_mul(multiplier)
1758 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1762 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1763 pub fn prev_config(&self) -> Option<ChannelConfig> {
1764 self.prev_config.map(|prev_config| prev_config.0)
1767 // Checks whether we should emit a `ChannelPending` event.
1768 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1769 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1772 // Returns whether we already emitted a `ChannelPending` event.
1773 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1774 self.channel_pending_event_emitted
1777 // Remembers that we already emitted a `ChannelPending` event.
1778 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1779 self.channel_pending_event_emitted = true;
1782 // Checks whether we should emit a `ChannelReady` event.
1783 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1784 self.is_usable() && !self.channel_ready_event_emitted
1787 // Remembers that we already emitted a `ChannelReady` event.
1788 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1789 self.channel_ready_event_emitted = true;
1792 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1793 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1794 /// no longer be considered when forwarding HTLCs.
1795 pub fn maybe_expire_prev_config(&mut self) {
1796 if self.prev_config.is_none() {
1799 let prev_config = self.prev_config.as_mut().unwrap();
1801 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1802 self.prev_config = None;
1806 /// Returns the current [`ChannelConfig`] applied to the channel.
1807 pub fn config(&self) -> ChannelConfig {
1811 /// Updates the channel's config. A bool is returned indicating whether the config update
1812 /// applied resulted in a new ChannelUpdate message.
1813 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1814 let did_channel_update =
1815 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1816 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1817 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1818 if did_channel_update {
1819 self.prev_config = Some((self.config.options, 0));
1820 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1821 // policy change to propagate throughout the network.
1822 self.update_time_counter += 1;
1824 self.config.options = *config;
1828 /// Returns true if funding_signed was sent/received and the
1829 /// funding transaction has been broadcast if necessary.
1830 pub fn is_funding_broadcast(&self) -> bool {
1831 !self.channel_state.is_pre_funded_state() &&
1832 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1835 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1836 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1837 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1838 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1839 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1841 /// @local is used only to convert relevant internal structures which refer to remote vs local
1842 /// to decide value of outputs and direction of HTLCs.
1843 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1844 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1845 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1846 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1847 /// which peer generated this transaction and "to whom" this transaction flows.
1849 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1850 where L::Target: Logger
1852 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1853 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1854 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1856 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1857 let mut remote_htlc_total_msat = 0;
1858 let mut local_htlc_total_msat = 0;
1859 let mut value_to_self_msat_offset = 0;
1861 let mut feerate_per_kw = self.feerate_per_kw;
1862 if let Some((feerate, update_state)) = self.pending_update_fee {
1863 if match update_state {
1864 // Note that these match the inclusion criteria when scanning
1865 // pending_inbound_htlcs below.
1866 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1867 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1868 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1870 feerate_per_kw = feerate;
1874 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1875 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1876 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1878 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1880 macro_rules! get_htlc_in_commitment {
1881 ($htlc: expr, $offered: expr) => {
1882 HTLCOutputInCommitment {
1884 amount_msat: $htlc.amount_msat,
1885 cltv_expiry: $htlc.cltv_expiry,
1886 payment_hash: $htlc.payment_hash,
1887 transaction_output_index: None
1892 macro_rules! add_htlc_output {
1893 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1894 if $outbound == local { // "offered HTLC output"
1895 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1896 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1899 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1901 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1902 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1903 included_non_dust_htlcs.push((htlc_in_tx, $source));
1905 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1906 included_dust_htlcs.push((htlc_in_tx, $source));
1909 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1910 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1913 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1915 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1916 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1917 included_non_dust_htlcs.push((htlc_in_tx, $source));
1919 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1920 included_dust_htlcs.push((htlc_in_tx, $source));
1926 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1928 for ref htlc in self.pending_inbound_htlcs.iter() {
1929 let (include, state_name) = match htlc.state {
1930 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1931 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1932 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1933 InboundHTLCState::Committed => (true, "Committed"),
1934 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1938 add_htlc_output!(htlc, false, None, state_name);
1939 remote_htlc_total_msat += htlc.amount_msat;
1941 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1943 &InboundHTLCState::LocalRemoved(ref reason) => {
1944 if generated_by_local {
1945 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1946 inbound_htlc_preimages.push(preimage);
1947 value_to_self_msat_offset += htlc.amount_msat as i64;
1957 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1959 for ref htlc in self.pending_outbound_htlcs.iter() {
1960 let (include, state_name) = match htlc.state {
1961 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1962 OutboundHTLCState::Committed => (true, "Committed"),
1963 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1964 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1965 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1968 let preimage_opt = match htlc.state {
1969 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1970 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1971 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1975 if let Some(preimage) = preimage_opt {
1976 outbound_htlc_preimages.push(preimage);
1980 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1981 local_htlc_total_msat += htlc.amount_msat;
1983 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1985 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1986 value_to_self_msat_offset -= htlc.amount_msat as i64;
1988 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1989 if !generated_by_local {
1990 value_to_self_msat_offset -= htlc.amount_msat as i64;
1998 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1999 assert!(value_to_self_msat >= 0);
2000 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2001 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2002 // "violate" their reserve value by couting those against it. Thus, we have to convert
2003 // everything to i64 before subtracting as otherwise we can overflow.
2004 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2005 assert!(value_to_remote_msat >= 0);
2007 #[cfg(debug_assertions)]
2009 // Make sure that the to_self/to_remote is always either past the appropriate
2010 // channel_reserve *or* it is making progress towards it.
2011 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2012 self.holder_max_commitment_tx_output.lock().unwrap()
2014 self.counterparty_max_commitment_tx_output.lock().unwrap()
2016 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2017 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2018 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2019 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2022 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2023 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2024 let (value_to_self, value_to_remote) = if self.is_outbound() {
2025 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2027 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2030 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2031 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2032 let (funding_pubkey_a, funding_pubkey_b) = if local {
2033 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2035 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2038 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2039 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2044 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2045 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2050 let num_nondust_htlcs = included_non_dust_htlcs.len();
2052 let channel_parameters =
2053 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2054 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2055 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2062 &mut included_non_dust_htlcs,
2065 let mut htlcs_included = included_non_dust_htlcs;
2066 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2067 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2068 htlcs_included.append(&mut included_dust_htlcs);
2076 local_balance_msat: value_to_self_msat as u64,
2077 remote_balance_msat: value_to_remote_msat as u64,
2078 inbound_htlc_preimages,
2079 outbound_htlc_preimages,
2084 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2085 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2086 /// our counterparty!)
2087 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2088 /// TODO Some magic rust shit to compile-time check this?
2089 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2090 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2091 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2092 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2093 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2095 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2099 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2100 /// will sign and send to our counterparty.
2101 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2102 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2103 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2104 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2105 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2107 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2110 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2111 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2112 /// Panics if called before accept_channel/InboundV1Channel::new
2113 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2114 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2117 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2118 &self.get_counterparty_pubkeys().funding_pubkey
2121 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2125 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2126 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2127 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2128 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2129 // more dust balance if the feerate increases when we have several HTLCs pending
2130 // which are near the dust limit.
2131 let mut feerate_per_kw = self.feerate_per_kw;
2132 // If there's a pending update fee, use it to ensure we aren't under-estimating
2133 // potential feerate updates coming soon.
2134 if let Some((feerate, _)) = self.pending_update_fee {
2135 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2137 if let Some(feerate) = outbound_feerate_update {
2138 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2140 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2141 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2144 /// Get forwarding information for the counterparty.
2145 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2146 self.counterparty_forwarding_info.clone()
2149 /// Returns a HTLCStats about inbound pending htlcs
2150 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2152 let mut stats = HTLCStats {
2153 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2154 pending_htlcs_value_msat: 0,
2155 on_counterparty_tx_dust_exposure_msat: 0,
2156 on_holder_tx_dust_exposure_msat: 0,
2157 holding_cell_msat: 0,
2158 on_holder_tx_holding_cell_htlcs_count: 0,
2161 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2164 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2165 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2166 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2168 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2169 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2170 for ref htlc in context.pending_inbound_htlcs.iter() {
2171 stats.pending_htlcs_value_msat += htlc.amount_msat;
2172 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2173 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2175 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2176 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2182 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2183 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2185 let mut stats = HTLCStats {
2186 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2187 pending_htlcs_value_msat: 0,
2188 on_counterparty_tx_dust_exposure_msat: 0,
2189 on_holder_tx_dust_exposure_msat: 0,
2190 holding_cell_msat: 0,
2191 on_holder_tx_holding_cell_htlcs_count: 0,
2194 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2197 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2198 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2199 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2201 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2202 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2203 for ref htlc in context.pending_outbound_htlcs.iter() {
2204 stats.pending_htlcs_value_msat += htlc.amount_msat;
2205 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2206 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2208 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2209 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2213 for update in context.holding_cell_htlc_updates.iter() {
2214 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2215 stats.pending_htlcs += 1;
2216 stats.pending_htlcs_value_msat += amount_msat;
2217 stats.holding_cell_msat += amount_msat;
2218 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2219 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2221 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2222 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2224 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2231 /// Returns information on all pending inbound HTLCs.
2232 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2233 let mut holding_cell_states = new_hash_map();
2234 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2235 match holding_cell_update {
2236 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2237 holding_cell_states.insert(
2239 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2242 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2243 holding_cell_states.insert(
2245 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2248 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2249 holding_cell_states.insert(
2251 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2255 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2258 let mut inbound_details = Vec::new();
2259 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2262 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2263 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2265 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2266 for htlc in self.pending_inbound_htlcs.iter() {
2267 if let Some(state_details) = (&htlc.state).into() {
2268 inbound_details.push(InboundHTLCDetails{
2269 htlc_id: htlc.htlc_id,
2270 amount_msat: htlc.amount_msat,
2271 cltv_expiry: htlc.cltv_expiry,
2272 payment_hash: htlc.payment_hash,
2273 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2274 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2281 /// Returns information on all pending outbound HTLCs.
2282 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2283 let mut outbound_details = Vec::new();
2284 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2287 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2288 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2290 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2291 for htlc in self.pending_outbound_htlcs.iter() {
2292 outbound_details.push(OutboundHTLCDetails{
2293 htlc_id: Some(htlc.htlc_id),
2294 amount_msat: htlc.amount_msat,
2295 cltv_expiry: htlc.cltv_expiry,
2296 payment_hash: htlc.payment_hash,
2297 skimmed_fee_msat: htlc.skimmed_fee_msat,
2298 state: Some((&htlc.state).into()),
2299 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2302 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2303 if let HTLCUpdateAwaitingACK::AddHTLC {
2309 } = *holding_cell_update {
2310 outbound_details.push(OutboundHTLCDetails{
2312 amount_msat: amount_msat,
2313 cltv_expiry: cltv_expiry,
2314 payment_hash: payment_hash,
2315 skimmed_fee_msat: skimmed_fee_msat,
2316 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2317 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2324 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2325 /// Doesn't bother handling the
2326 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2327 /// corner case properly.
2328 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2329 -> AvailableBalances
2330 where F::Target: FeeEstimator
2332 let context = &self;
2333 // Note that we have to handle overflow due to the above case.
2334 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2335 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2337 let mut balance_msat = context.value_to_self_msat;
2338 for ref htlc in context.pending_inbound_htlcs.iter() {
2339 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2340 balance_msat += htlc.amount_msat;
2343 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2345 let outbound_capacity_msat = context.value_to_self_msat
2346 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2348 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2350 let mut available_capacity_msat = outbound_capacity_msat;
2352 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2353 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2357 if context.is_outbound() {
2358 // We should mind channel commit tx fee when computing how much of the available capacity
2359 // can be used in the next htlc. Mirrors the logic in send_htlc.
2361 // The fee depends on whether the amount we will be sending is above dust or not,
2362 // and the answer will in turn change the amount itself — making it a circular
2364 // This complicates the computation around dust-values, up to the one-htlc-value.
2365 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2366 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2367 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2370 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2371 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2372 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2373 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2374 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2375 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2376 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2379 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2380 // value ends up being below dust, we have this fee available again. In that case,
2381 // match the value to right-below-dust.
2382 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2383 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2384 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2385 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2386 debug_assert!(one_htlc_difference_msat != 0);
2387 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2388 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2389 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2391 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2394 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2395 // sending a new HTLC won't reduce their balance below our reserve threshold.
2396 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2397 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2398 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2401 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2402 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2404 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2405 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2406 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2408 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2409 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2410 // we've selected for them, we can only send dust HTLCs.
2411 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2415 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2417 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2418 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2419 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2420 // send above the dust limit (as the router can always overpay to meet the dust limit).
2421 let mut remaining_msat_below_dust_exposure_limit = None;
2422 let mut dust_exposure_dust_limit_msat = 0;
2423 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2425 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2426 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2428 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2429 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2430 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2432 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2433 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2434 remaining_msat_below_dust_exposure_limit =
2435 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2436 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2439 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2440 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2441 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2442 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2443 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2444 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2447 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2448 if available_capacity_msat < dust_exposure_dust_limit_msat {
2449 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2451 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2455 available_capacity_msat = cmp::min(available_capacity_msat,
2456 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2458 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2459 available_capacity_msat = 0;
2463 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2464 - context.value_to_self_msat as i64
2465 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2466 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2468 outbound_capacity_msat,
2469 next_outbound_htlc_limit_msat: available_capacity_msat,
2470 next_outbound_htlc_minimum_msat,
2475 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2476 let context = &self;
2477 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2480 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2481 /// number of pending HTLCs that are on track to be in our next commitment tx.
2483 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2484 /// `fee_spike_buffer_htlc` is `Some`.
2486 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2487 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2489 /// Dust HTLCs are excluded.
2490 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2491 let context = &self;
2492 assert!(context.is_outbound());
2494 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2497 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2498 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2500 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2501 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2503 let mut addl_htlcs = 0;
2504 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2506 HTLCInitiator::LocalOffered => {
2507 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2511 HTLCInitiator::RemoteOffered => {
2512 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2518 let mut included_htlcs = 0;
2519 for ref htlc in context.pending_inbound_htlcs.iter() {
2520 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2523 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2524 // transaction including this HTLC if it times out before they RAA.
2525 included_htlcs += 1;
2528 for ref htlc in context.pending_outbound_htlcs.iter() {
2529 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2533 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2534 OutboundHTLCState::Committed => included_htlcs += 1,
2535 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2536 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2537 // transaction won't be generated until they send us their next RAA, which will mean
2538 // dropping any HTLCs in this state.
2543 for htlc in context.holding_cell_htlc_updates.iter() {
2545 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2546 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2551 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2552 // ack we're guaranteed to never include them in commitment txs anymore.
2556 let num_htlcs = included_htlcs + addl_htlcs;
2557 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2558 #[cfg(any(test, fuzzing))]
2561 if fee_spike_buffer_htlc.is_some() {
2562 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2564 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2565 + context.holding_cell_htlc_updates.len();
2566 let commitment_tx_info = CommitmentTxInfoCached {
2568 total_pending_htlcs,
2569 next_holder_htlc_id: match htlc.origin {
2570 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2571 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2573 next_counterparty_htlc_id: match htlc.origin {
2574 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2575 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2577 feerate: context.feerate_per_kw,
2579 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2584 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2585 /// pending HTLCs that are on track to be in their next commitment tx
2587 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2588 /// `fee_spike_buffer_htlc` is `Some`.
2590 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2591 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2593 /// Dust HTLCs are excluded.
2594 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2595 let context = &self;
2596 assert!(!context.is_outbound());
2598 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2601 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2602 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2604 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2605 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2607 let mut addl_htlcs = 0;
2608 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2610 HTLCInitiator::LocalOffered => {
2611 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2615 HTLCInitiator::RemoteOffered => {
2616 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2622 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2623 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2624 // committed outbound HTLCs, see below.
2625 let mut included_htlcs = 0;
2626 for ref htlc in context.pending_inbound_htlcs.iter() {
2627 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2630 included_htlcs += 1;
2633 for ref htlc in context.pending_outbound_htlcs.iter() {
2634 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2637 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2638 // i.e. if they've responded to us with an RAA after announcement.
2640 OutboundHTLCState::Committed => included_htlcs += 1,
2641 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2642 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2647 let num_htlcs = included_htlcs + addl_htlcs;
2648 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2649 #[cfg(any(test, fuzzing))]
2652 if fee_spike_buffer_htlc.is_some() {
2653 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2655 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2656 let commitment_tx_info = CommitmentTxInfoCached {
2658 total_pending_htlcs,
2659 next_holder_htlc_id: match htlc.origin {
2660 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2661 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2663 next_counterparty_htlc_id: match htlc.origin {
2664 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2665 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2667 feerate: context.feerate_per_kw,
2669 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2674 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2675 match self.channel_state {
2676 ChannelState::FundingNegotiated => f(),
2677 ChannelState::AwaitingChannelReady(flags) =>
2678 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2679 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2689 /// Returns the transaction if there is a pending funding transaction that is yet to be
2691 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2692 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2695 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2697 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2698 self.if_unbroadcasted_funding(||
2699 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2703 /// Returns whether the channel is funded in a batch.
2704 pub fn is_batch_funding(&self) -> bool {
2705 self.is_batch_funding.is_some()
2708 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2710 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2711 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2714 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2715 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2716 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2717 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2718 /// immediately (others we will have to allow to time out).
2719 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2720 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2721 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2722 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2723 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2724 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2726 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2727 // return them to fail the payment.
2728 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2729 let counterparty_node_id = self.get_counterparty_node_id();
2730 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2732 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2733 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2738 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2739 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2740 // returning a channel monitor update here would imply a channel monitor update before
2741 // we even registered the channel monitor to begin with, which is invalid.
2742 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2743 // funding transaction, don't return a funding txo (which prevents providing the
2744 // monitor update to the user, even if we return one).
2745 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2746 if !self.channel_state.is_pre_funded_state() {
2747 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2748 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
2749 update_id: self.latest_monitor_update_id,
2750 counterparty_node_id: Some(self.counterparty_node_id),
2751 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2752 channel_id: Some(self.channel_id()),
2756 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2757 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2759 self.channel_state = ChannelState::ShutdownComplete;
2760 self.update_time_counter += 1;
2764 dropped_outbound_htlcs,
2765 unbroadcasted_batch_funding_txid,
2766 channel_id: self.channel_id,
2767 user_channel_id: self.user_id,
2768 channel_capacity_satoshis: self.channel_value_satoshis,
2769 counterparty_node_id: self.counterparty_node_id,
2770 unbroadcasted_funding_tx,
2771 channel_funding_txo: self.get_funding_txo(),
2775 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2776 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2777 let counterparty_keys = self.build_remote_transaction_keys();
2778 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2780 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2781 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2782 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2783 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2785 match &self.holder_signer {
2786 // TODO (arik): move match into calling method for Taproot
2787 ChannelSignerType::Ecdsa(ecdsa) => {
2788 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2789 .map(|(signature, _)| msgs::FundingSigned {
2790 channel_id: self.channel_id(),
2793 partial_signature_with_nonce: None,
2797 if funding_signed.is_none() {
2798 #[cfg(not(async_signing))] {
2799 panic!("Failed to get signature for funding_signed");
2801 #[cfg(async_signing)] {
2802 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2803 self.signer_pending_funding = true;
2805 } else if self.signer_pending_funding {
2806 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2807 self.signer_pending_funding = false;
2810 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2811 (counterparty_initial_commitment_tx, funding_signed)
2813 // TODO (taproot|arik)
2820 // Internal utility functions for channels
2822 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2823 /// `channel_value_satoshis` in msat, set through
2824 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2826 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2828 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2829 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2830 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2832 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2835 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2837 channel_value_satoshis * 10 * configured_percent
2840 /// Returns a minimum channel reserve value the remote needs to maintain,
2841 /// required by us according to the configured or default
2842 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2844 /// Guaranteed to return a value no larger than channel_value_satoshis
2846 /// This is used both for outbound and inbound channels and has lower bound
2847 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2848 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2849 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2850 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2853 /// This is for legacy reasons, present for forward-compatibility.
2854 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2855 /// from storage. Hence, we use this function to not persist default values of
2856 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2857 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2858 let (q, _) = channel_value_satoshis.overflowing_div(100);
2859 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2862 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2863 // Note that num_htlcs should not include dust HTLCs.
2865 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2866 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2869 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2870 // Note that num_htlcs should not include dust HTLCs.
2871 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2872 // Note that we need to divide before multiplying to round properly,
2873 // since the lowest denomination of bitcoin on-chain is the satoshi.
2874 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2877 /// Context for dual-funded channels.
2878 #[cfg(dual_funding)]
2879 pub(super) struct DualFundingChannelContext {
2880 /// The amount in satoshis we will be contributing to the channel.
2881 pub our_funding_satoshis: u64,
2882 /// The amount in satoshis our counterparty will be contributing to the channel.
2883 pub their_funding_satoshis: u64,
2884 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
2885 /// to the current block height to align incentives against fee-sniping.
2886 pub funding_tx_locktime: u32,
2887 /// The feerate set by the initiator to be used for the funding transaction.
2888 pub funding_feerate_sat_per_1000_weight: u32,
2891 // Holder designates channel data owned for the benefit of the user client.
2892 // Counterparty designates channel data owned by the another channel participant entity.
2893 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2894 pub context: ChannelContext<SP>,
2897 #[cfg(any(test, fuzzing))]
2898 struct CommitmentTxInfoCached {
2900 total_pending_htlcs: usize,
2901 next_holder_htlc_id: u64,
2902 next_counterparty_htlc_id: u64,
2906 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2907 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2908 trait FailHTLCContents {
2909 type Message: FailHTLCMessageName;
2910 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2911 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2912 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2914 impl FailHTLCContents for msgs::OnionErrorPacket {
2915 type Message = msgs::UpdateFailHTLC;
2916 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2917 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2919 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2920 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2922 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2923 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2926 impl FailHTLCContents for ([u8; 32], u16) {
2927 type Message = msgs::UpdateFailMalformedHTLC;
2928 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2929 msgs::UpdateFailMalformedHTLC {
2932 sha256_of_onion: self.0,
2933 failure_code: self.1
2936 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2937 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2939 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2940 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2942 sha256_of_onion: self.0,
2943 failure_code: self.1
2948 trait FailHTLCMessageName {
2949 fn name() -> &'static str;
2951 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2952 fn name() -> &'static str {
2956 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2957 fn name() -> &'static str {
2958 "update_fail_malformed_htlc"
2962 impl<SP: Deref> Channel<SP> where
2963 SP::Target: SignerProvider,
2964 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2966 fn check_remote_fee<F: Deref, L: Deref>(
2967 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2968 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2969 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2971 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2972 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2974 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2976 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2977 if feerate_per_kw < lower_limit {
2978 if let Some(cur_feerate) = cur_feerate_per_kw {
2979 if feerate_per_kw > cur_feerate {
2981 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2982 cur_feerate, feerate_per_kw);
2986 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2992 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2993 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2994 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2995 // outside of those situations will fail.
2996 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3000 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3005 1 + // script length (0)
3009 )*4 + // * 4 for non-witness parts
3010 2 + // witness marker and flag
3011 1 + // witness element count
3012 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3013 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3014 2*(1 + 71); // two signatures + sighash type flags
3015 if let Some(spk) = a_scriptpubkey {
3016 ret += ((8+1) + // output values and script length
3017 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3019 if let Some(spk) = b_scriptpubkey {
3020 ret += ((8+1) + // output values and script length
3021 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3027 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3028 assert!(self.context.pending_inbound_htlcs.is_empty());
3029 assert!(self.context.pending_outbound_htlcs.is_empty());
3030 assert!(self.context.pending_update_fee.is_none());
3032 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3033 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3034 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3036 if value_to_holder < 0 {
3037 assert!(self.context.is_outbound());
3038 total_fee_satoshis += (-value_to_holder) as u64;
3039 } else if value_to_counterparty < 0 {
3040 assert!(!self.context.is_outbound());
3041 total_fee_satoshis += (-value_to_counterparty) as u64;
3044 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3045 value_to_counterparty = 0;
3048 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3049 value_to_holder = 0;
3052 assert!(self.context.shutdown_scriptpubkey.is_some());
3053 let holder_shutdown_script = self.get_closing_scriptpubkey();
3054 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3055 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3057 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3058 (closing_transaction, total_fee_satoshis)
3061 fn funding_outpoint(&self) -> OutPoint {
3062 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3065 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3068 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3069 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3071 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3073 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3074 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3075 where L::Target: Logger {
3076 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3077 // (see equivalent if condition there).
3078 assert!(!self.context.channel_state.can_generate_new_commitment());
3079 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3080 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3081 self.context.latest_monitor_update_id = mon_update_id;
3082 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3083 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3087 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3088 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3089 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3090 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3092 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3093 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3096 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3097 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3098 // these, but for now we just have to treat them as normal.
3100 let mut pending_idx = core::usize::MAX;
3101 let mut htlc_value_msat = 0;
3102 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3103 if htlc.htlc_id == htlc_id_arg {
3104 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3105 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3106 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3108 InboundHTLCState::Committed => {},
3109 InboundHTLCState::LocalRemoved(ref reason) => {
3110 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3112 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3113 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3115 return UpdateFulfillFetch::DuplicateClaim {};
3118 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3119 // Don't return in release mode here so that we can update channel_monitor
3123 htlc_value_msat = htlc.amount_msat;
3127 if pending_idx == core::usize::MAX {
3128 #[cfg(any(test, fuzzing))]
3129 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3130 // this is simply a duplicate claim, not previously failed and we lost funds.
3131 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3132 return UpdateFulfillFetch::DuplicateClaim {};
3135 // Now update local state:
3137 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3138 // can claim it even if the channel hits the chain before we see their next commitment.
3139 self.context.latest_monitor_update_id += 1;
3140 let monitor_update = ChannelMonitorUpdate {
3141 update_id: self.context.latest_monitor_update_id,
3142 counterparty_node_id: Some(self.context.counterparty_node_id),
3143 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3144 payment_preimage: payment_preimage_arg.clone(),
3146 channel_id: Some(self.context.channel_id()),
3149 if !self.context.channel_state.can_generate_new_commitment() {
3150 // Note that this condition is the same as the assertion in
3151 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3152 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3153 // do not not get into this branch.
3154 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3155 match pending_update {
3156 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3157 if htlc_id_arg == htlc_id {
3158 // Make sure we don't leave latest_monitor_update_id incremented here:
3159 self.context.latest_monitor_update_id -= 1;
3160 #[cfg(any(test, fuzzing))]
3161 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3162 return UpdateFulfillFetch::DuplicateClaim {};
3165 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3166 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3168 if htlc_id_arg == htlc_id {
3169 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3170 // TODO: We may actually be able to switch to a fulfill here, though its
3171 // rare enough it may not be worth the complexity burden.
3172 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3173 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3179 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3180 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3181 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3183 #[cfg(any(test, fuzzing))]
3184 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3185 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3187 #[cfg(any(test, fuzzing))]
3188 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3191 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3192 if let InboundHTLCState::Committed = htlc.state {
3194 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3195 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3197 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3198 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3201 UpdateFulfillFetch::NewClaim {
3204 msg: Some(msgs::UpdateFulfillHTLC {
3205 channel_id: self.context.channel_id(),
3206 htlc_id: htlc_id_arg,
3207 payment_preimage: payment_preimage_arg,
3212 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3213 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3214 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3215 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3216 // Even if we aren't supposed to let new monitor updates with commitment state
3217 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3218 // matter what. Sadly, to push a new monitor update which flies before others
3219 // already queued, we have to insert it into the pending queue and update the
3220 // update_ids of all the following monitors.
3221 if release_cs_monitor && msg.is_some() {
3222 let mut additional_update = self.build_commitment_no_status_check(logger);
3223 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3224 // to be strictly increasing by one, so decrement it here.
3225 self.context.latest_monitor_update_id = monitor_update.update_id;
3226 monitor_update.updates.append(&mut additional_update.updates);
3228 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3229 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3230 monitor_update.update_id = new_mon_id;
3231 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3232 held_update.update.update_id += 1;
3235 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3236 let update = self.build_commitment_no_status_check(logger);
3237 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3243 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3244 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3246 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3250 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3251 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3252 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3253 /// before we fail backwards.
3255 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3256 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3257 /// [`ChannelError::Ignore`].
3258 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3259 -> Result<(), ChannelError> where L::Target: Logger {
3260 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3261 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3264 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3265 /// want to fail blinded HTLCs where we are not the intro node.
3267 /// See [`Self::queue_fail_htlc`] for more info.
3268 pub fn queue_fail_malformed_htlc<L: Deref>(
3269 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3270 ) -> Result<(), ChannelError> where L::Target: Logger {
3271 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3272 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3275 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3276 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3277 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3278 /// before we fail backwards.
3280 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3281 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3282 /// [`ChannelError::Ignore`].
3283 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3284 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3286 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3287 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3288 panic!("Was asked to fail an HTLC when channel was not in an operational state");
3291 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3292 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3293 // these, but for now we just have to treat them as normal.
3295 let mut pending_idx = core::usize::MAX;
3296 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3297 if htlc.htlc_id == htlc_id_arg {
3299 InboundHTLCState::Committed => {},
3300 InboundHTLCState::LocalRemoved(ref reason) => {
3301 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3303 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3308 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3309 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3315 if pending_idx == core::usize::MAX {
3316 #[cfg(any(test, fuzzing))]
3317 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3318 // is simply a duplicate fail, not previously failed and we failed-back too early.
3319 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3323 if !self.context.channel_state.can_generate_new_commitment() {
3324 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3325 force_holding_cell = true;
3328 // Now update local state:
3329 if force_holding_cell {
3330 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3331 match pending_update {
3332 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3333 if htlc_id_arg == htlc_id {
3334 #[cfg(any(test, fuzzing))]
3335 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3339 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3340 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3342 if htlc_id_arg == htlc_id {
3343 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3344 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3350 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3351 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3355 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3356 E::Message::name(), &self.context.channel_id());
3358 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3359 htlc.state = err_contents.clone().to_inbound_htlc_state();
3362 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3365 // Message handlers:
3366 /// Updates the state of the channel to indicate that all channels in the batch have received
3367 /// funding_signed and persisted their monitors.
3368 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3369 /// treated as a non-batch channel going forward.
3370 pub fn set_batch_ready(&mut self) {
3371 self.context.is_batch_funding = None;
3372 self.context.channel_state.clear_waiting_for_batch();
3375 /// Unsets the existing funding information.
3377 /// This must only be used if the channel has not yet completed funding and has not been used.
3379 /// Further, the channel must be immediately shut down after this with a call to
3380 /// [`ChannelContext::force_shutdown`].
3381 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3382 debug_assert!(matches!(
3383 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3385 self.context.channel_transaction_parameters.funding_outpoint = None;
3386 self.context.channel_id = temporary_channel_id;
3389 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3390 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3392 pub fn channel_ready<NS: Deref, L: Deref>(
3393 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3394 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3395 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3397 NS::Target: NodeSigner,
3400 if self.context.channel_state.is_peer_disconnected() {
3401 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3402 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3405 if let Some(scid_alias) = msg.short_channel_id_alias {
3406 if Some(scid_alias) != self.context.short_channel_id {
3407 // The scid alias provided can be used to route payments *from* our counterparty,
3408 // i.e. can be used for inbound payments and provided in invoices, but is not used
3409 // when routing outbound payments.
3410 self.context.latest_inbound_scid_alias = Some(scid_alias);
3414 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3415 // batch, but we can receive channel_ready messages.
3416 let mut check_reconnection = false;
3417 match &self.context.channel_state {
3418 ChannelState::AwaitingChannelReady(flags) => {
3419 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3420 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3421 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3422 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3423 check_reconnection = true;
3424 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3425 self.context.channel_state.set_their_channel_ready();
3426 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3427 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3428 self.context.update_time_counter += 1;
3430 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3431 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3434 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3435 ChannelState::ChannelReady(_) => check_reconnection = true,
3436 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3438 if check_reconnection {
3439 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3440 // required, or they're sending a fresh SCID alias.
3441 let expected_point =
3442 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3443 // If they haven't ever sent an updated point, the point they send should match
3445 self.context.counterparty_cur_commitment_point
3446 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3447 // If we've advanced the commitment number once, the second commitment point is
3448 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3449 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3450 self.context.counterparty_prev_commitment_point
3452 // If they have sent updated points, channel_ready is always supposed to match
3453 // their "first" point, which we re-derive here.
3454 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3455 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3456 ).expect("We already advanced, so previous secret keys should have been validated already")))
3458 if expected_point != Some(msg.next_per_commitment_point) {
3459 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3464 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3465 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3467 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3469 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3472 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3473 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3474 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3475 ) -> Result<(), ChannelError>
3476 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3477 FE::Target: FeeEstimator, L::Target: Logger,
3479 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3480 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3482 // We can't accept HTLCs sent after we've sent a shutdown.
3483 if self.context.channel_state.is_local_shutdown_sent() {
3484 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3486 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3487 if self.context.channel_state.is_remote_shutdown_sent() {
3488 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3490 if self.context.channel_state.is_peer_disconnected() {
3491 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3493 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3494 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3496 if msg.amount_msat == 0 {
3497 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3499 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3500 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3503 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3504 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3505 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3506 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3508 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3509 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3512 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3513 // the reserve_satoshis we told them to always have as direct payment so that they lose
3514 // something if we punish them for broadcasting an old state).
3515 // Note that we don't really care about having a small/no to_remote output in our local
3516 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3517 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3518 // present in the next commitment transaction we send them (at least for fulfilled ones,
3519 // failed ones won't modify value_to_self).
3520 // Note that we will send HTLCs which another instance of rust-lightning would think
3521 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3522 // Channel state once they will not be present in the next received commitment
3524 let mut removed_outbound_total_msat = 0;
3525 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3526 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3527 removed_outbound_total_msat += htlc.amount_msat;
3528 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3529 removed_outbound_total_msat += htlc.amount_msat;
3533 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3534 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3537 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3538 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3539 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3541 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3542 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3543 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3544 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3545 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3546 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3547 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3551 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3552 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3553 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3554 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3555 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3556 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3557 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3561 let pending_value_to_self_msat =
3562 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3563 let pending_remote_value_msat =
3564 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3565 if pending_remote_value_msat < msg.amount_msat {
3566 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3569 // Check that the remote can afford to pay for this HTLC on-chain at the current
3570 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3572 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3573 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3574 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3576 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3577 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3581 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3582 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3584 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3585 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3589 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3590 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3594 if !self.context.is_outbound() {
3595 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3596 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3597 // side, only on the sender's. Note that with anchor outputs we are no longer as
3598 // sensitive to fee spikes, so we need to account for them.
3599 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3600 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3601 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3602 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3604 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3605 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3606 // the HTLC, i.e. its status is already set to failing.
3607 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3608 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3611 // Check that they won't violate our local required channel reserve by adding this HTLC.
3612 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3613 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3614 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3615 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3618 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3619 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3621 if msg.cltv_expiry >= 500000000 {
3622 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3625 if self.context.channel_state.is_local_shutdown_sent() {
3626 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3627 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3631 // Now update local state:
3632 self.context.next_counterparty_htlc_id += 1;
3633 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3634 htlc_id: msg.htlc_id,
3635 amount_msat: msg.amount_msat,
3636 payment_hash: msg.payment_hash,
3637 cltv_expiry: msg.cltv_expiry,
3638 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3643 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3645 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3646 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3647 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3648 if htlc.htlc_id == htlc_id {
3649 let outcome = match check_preimage {
3650 None => fail_reason.into(),
3651 Some(payment_preimage) => {
3652 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3653 if payment_hash != htlc.payment_hash {
3654 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3656 OutboundHTLCOutcome::Success(Some(payment_preimage))
3660 OutboundHTLCState::LocalAnnounced(_) =>
3661 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3662 OutboundHTLCState::Committed => {
3663 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3665 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3666 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3671 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3674 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
3675 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3676 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3678 if self.context.channel_state.is_peer_disconnected() {
3679 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3682 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
3685 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3686 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3687 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3689 if self.context.channel_state.is_peer_disconnected() {
3690 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3693 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3697 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3698 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3699 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3701 if self.context.channel_state.is_peer_disconnected() {
3702 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3705 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3709 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3710 where L::Target: Logger
3712 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3713 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3715 if self.context.channel_state.is_peer_disconnected() {
3716 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3718 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3719 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3722 let funding_script = self.context.get_funding_redeemscript();
3724 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3726 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3727 let commitment_txid = {
3728 let trusted_tx = commitment_stats.tx.trust();
3729 let bitcoin_tx = trusted_tx.built_transaction();
3730 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3732 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3733 log_bytes!(msg.signature.serialize_compact()[..]),
3734 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3735 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3736 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3737 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3741 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3743 // If our counterparty updated the channel fee in this commitment transaction, check that
3744 // they can actually afford the new fee now.
3745 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3746 update_state == FeeUpdateState::RemoteAnnounced
3749 debug_assert!(!self.context.is_outbound());
3750 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3751 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3752 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3755 #[cfg(any(test, fuzzing))]
3757 if self.context.is_outbound() {
3758 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3759 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3760 if let Some(info) = projected_commit_tx_info {
3761 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3762 + self.context.holding_cell_htlc_updates.len();
3763 if info.total_pending_htlcs == total_pending_htlcs
3764 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3765 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3766 && info.feerate == self.context.feerate_per_kw {
3767 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3773 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3774 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3777 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3778 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3779 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3780 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3781 // backwards compatibility, we never use it in production. To provide test coverage, here,
3782 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3783 #[allow(unused_assignments, unused_mut)]
3784 let mut separate_nondust_htlc_sources = false;
3785 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3786 use core::hash::{BuildHasher, Hasher};
3787 // Get a random value using the only std API to do so - the DefaultHasher
3788 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3789 separate_nondust_htlc_sources = rand_val % 2 == 0;
3792 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3793 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3794 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3795 if let Some(_) = htlc.transaction_output_index {
3796 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3797 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3798 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3800 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3801 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3802 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3803 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3804 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3805 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3806 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3807 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3809 if !separate_nondust_htlc_sources {
3810 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3813 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3815 if separate_nondust_htlc_sources {
3816 if let Some(source) = source_opt.take() {
3817 nondust_htlc_sources.push(source);
3820 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3823 let holder_commitment_tx = HolderCommitmentTransaction::new(
3824 commitment_stats.tx,
3826 msg.htlc_signatures.clone(),
3827 &self.context.get_holder_pubkeys().funding_pubkey,
3828 self.context.counterparty_funding_pubkey()
3831 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3832 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3834 // Update state now that we've passed all the can-fail calls...
3835 let mut need_commitment = false;
3836 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3837 if *update_state == FeeUpdateState::RemoteAnnounced {
3838 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3839 need_commitment = true;
3843 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3844 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3845 Some(forward_info.clone())
3847 if let Some(forward_info) = new_forward {
3848 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3849 &htlc.payment_hash, &self.context.channel_id);
3850 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3851 need_commitment = true;
3854 let mut claimed_htlcs = Vec::new();
3855 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3856 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3857 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3858 &htlc.payment_hash, &self.context.channel_id);
3859 // Grab the preimage, if it exists, instead of cloning
3860 let mut reason = OutboundHTLCOutcome::Success(None);
3861 mem::swap(outcome, &mut reason);
3862 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3863 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3864 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3865 // have a `Success(None)` reason. In this case we could forget some HTLC
3866 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3867 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3869 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3871 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3872 need_commitment = true;
3876 self.context.latest_monitor_update_id += 1;
3877 let mut monitor_update = ChannelMonitorUpdate {
3878 update_id: self.context.latest_monitor_update_id,
3879 counterparty_node_id: Some(self.context.counterparty_node_id),
3880 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3881 commitment_tx: holder_commitment_tx,
3882 htlc_outputs: htlcs_and_sigs,
3884 nondust_htlc_sources,
3886 channel_id: Some(self.context.channel_id()),
3889 self.context.cur_holder_commitment_transaction_number -= 1;
3890 self.context.expecting_peer_commitment_signed = false;
3891 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3892 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3893 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3895 if self.context.channel_state.is_monitor_update_in_progress() {
3896 // In case we initially failed monitor updating without requiring a response, we need
3897 // to make sure the RAA gets sent first.
3898 self.context.monitor_pending_revoke_and_ack = true;
3899 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3900 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3901 // the corresponding HTLC status updates so that
3902 // get_last_commitment_update_for_send includes the right HTLCs.
3903 self.context.monitor_pending_commitment_signed = true;
3904 let mut additional_update = self.build_commitment_no_status_check(logger);
3905 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3906 // strictly increasing by one, so decrement it here.
3907 self.context.latest_monitor_update_id = monitor_update.update_id;
3908 monitor_update.updates.append(&mut additional_update.updates);
3910 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3911 &self.context.channel_id);
3912 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3915 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3916 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3917 // we'll send one right away when we get the revoke_and_ack when we
3918 // free_holding_cell_htlcs().
3919 let mut additional_update = self.build_commitment_no_status_check(logger);
3920 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3921 // strictly increasing by one, so decrement it here.
3922 self.context.latest_monitor_update_id = monitor_update.update_id;
3923 monitor_update.updates.append(&mut additional_update.updates);
3927 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3928 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3929 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3930 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3933 /// Public version of the below, checking relevant preconditions first.
3934 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3935 /// returns `(None, Vec::new())`.
3936 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3937 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3938 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3939 where F::Target: FeeEstimator, L::Target: Logger
3941 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
3942 self.free_holding_cell_htlcs(fee_estimator, logger)
3943 } else { (None, Vec::new()) }
3946 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3947 /// for our counterparty.
3948 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3949 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3950 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3951 where F::Target: FeeEstimator, L::Target: Logger
3953 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3954 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3955 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3956 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3958 let mut monitor_update = ChannelMonitorUpdate {
3959 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3960 counterparty_node_id: Some(self.context.counterparty_node_id),
3961 updates: Vec::new(),
3962 channel_id: Some(self.context.channel_id()),
3965 let mut htlc_updates = Vec::new();
3966 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3967 let mut update_add_count = 0;
3968 let mut update_fulfill_count = 0;
3969 let mut update_fail_count = 0;
3970 let mut htlcs_to_fail = Vec::new();
3971 for htlc_update in htlc_updates.drain(..) {
3972 // Note that this *can* fail, though it should be due to rather-rare conditions on
3973 // fee races with adding too many outputs which push our total payments just over
3974 // the limit. In case it's less rare than I anticipate, we may want to revisit
3975 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3976 // to rebalance channels.
3977 let fail_htlc_res = match &htlc_update {
3978 &HTLCUpdateAwaitingACK::AddHTLC {
3979 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3980 skimmed_fee_msat, blinding_point, ..
3982 match self.send_htlc(
3983 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3984 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3986 Ok(_) => update_add_count += 1,
3989 ChannelError::Ignore(ref msg) => {
3990 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3991 // If we fail to send here, then this HTLC should
3992 // be failed backwards. Failing to send here
3993 // indicates that this HTLC may keep being put back
3994 // into the holding cell without ever being
3995 // successfully forwarded/failed/fulfilled, causing
3996 // our counterparty to eventually close on us.
3997 htlcs_to_fail.push((source.clone(), *payment_hash));
4000 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4007 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4008 // If an HTLC claim was previously added to the holding cell (via
4009 // `get_update_fulfill_htlc`, then generating the claim message itself must
4010 // not fail - any in between attempts to claim the HTLC will have resulted
4011 // in it hitting the holding cell again and we cannot change the state of a
4012 // holding cell HTLC from fulfill to anything else.
4013 let mut additional_monitor_update =
4014 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4015 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4016 { monitor_update } else { unreachable!() };
4017 update_fulfill_count += 1;
4018 monitor_update.updates.append(&mut additional_monitor_update.updates);
4021 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4022 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4023 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4025 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4026 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4027 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4030 if let Some(res) = fail_htlc_res {
4032 Ok(fail_msg_opt) => {
4033 // If an HTLC failure was previously added to the holding cell (via
4034 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4035 // not fail - we should never end up in a state where we double-fail
4036 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4037 // for a full revocation before failing.
4038 debug_assert!(fail_msg_opt.is_some());
4039 update_fail_count += 1;
4041 Err(ChannelError::Ignore(_)) => {},
4043 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4048 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4049 return (None, htlcs_to_fail);
4051 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4052 self.send_update_fee(feerate, false, fee_estimator, logger)
4057 let mut additional_update = self.build_commitment_no_status_check(logger);
4058 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4059 // but we want them to be strictly increasing by one, so reset it here.
4060 self.context.latest_monitor_update_id = monitor_update.update_id;
4061 monitor_update.updates.append(&mut additional_update.updates);
4063 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4064 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4065 update_add_count, update_fulfill_count, update_fail_count);
4067 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4068 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4074 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4075 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4076 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4077 /// generating an appropriate error *after* the channel state has been updated based on the
4078 /// revoke_and_ack message.
4079 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4080 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4081 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4082 where F::Target: FeeEstimator, L::Target: Logger,
4084 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4085 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4087 if self.context.channel_state.is_peer_disconnected() {
4088 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4090 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4091 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4094 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4096 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4097 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4098 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4102 if !self.context.channel_state.is_awaiting_remote_revoke() {
4103 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4104 // haven't given them a new commitment transaction to broadcast). We should probably
4105 // take advantage of this by updating our channel monitor, sending them an error, and
4106 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4107 // lot of work, and there's some chance this is all a misunderstanding anyway.
4108 // We have to do *something*, though, since our signer may get mad at us for otherwise
4109 // jumping a remote commitment number, so best to just force-close and move on.
4110 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4113 #[cfg(any(test, fuzzing))]
4115 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4116 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4119 match &self.context.holder_signer {
4120 ChannelSignerType::Ecdsa(ecdsa) => {
4121 ecdsa.validate_counterparty_revocation(
4122 self.context.cur_counterparty_commitment_transaction_number + 1,
4124 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4126 // TODO (taproot|arik)
4131 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4132 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4133 self.context.latest_monitor_update_id += 1;
4134 let mut monitor_update = ChannelMonitorUpdate {
4135 update_id: self.context.latest_monitor_update_id,
4136 counterparty_node_id: Some(self.context.counterparty_node_id),
4137 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4138 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4139 secret: msg.per_commitment_secret,
4141 channel_id: Some(self.context.channel_id()),
4144 // Update state now that we've passed all the can-fail calls...
4145 // (note that we may still fail to generate the new commitment_signed message, but that's
4146 // OK, we step the channel here and *then* if the new generation fails we can fail the
4147 // channel based on that, but stepping stuff here should be safe either way.
4148 self.context.channel_state.clear_awaiting_remote_revoke();
4149 self.context.sent_message_awaiting_response = None;
4150 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4151 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4152 self.context.cur_counterparty_commitment_transaction_number -= 1;
4154 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4155 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4158 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4159 let mut to_forward_infos = Vec::new();
4160 let mut revoked_htlcs = Vec::new();
4161 let mut finalized_claimed_htlcs = Vec::new();
4162 let mut update_fail_htlcs = Vec::new();
4163 let mut update_fail_malformed_htlcs = Vec::new();
4164 let mut require_commitment = false;
4165 let mut value_to_self_msat_diff: i64 = 0;
4168 // Take references explicitly so that we can hold multiple references to self.context.
4169 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4170 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4171 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4173 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4174 pending_inbound_htlcs.retain(|htlc| {
4175 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4176 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4177 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4178 value_to_self_msat_diff += htlc.amount_msat as i64;
4180 *expecting_peer_commitment_signed = true;
4184 pending_outbound_htlcs.retain(|htlc| {
4185 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4186 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4187 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4188 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4190 finalized_claimed_htlcs.push(htlc.source.clone());
4191 // They fulfilled, so we sent them money
4192 value_to_self_msat_diff -= htlc.amount_msat as i64;
4197 for htlc in pending_inbound_htlcs.iter_mut() {
4198 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4200 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4204 let mut state = InboundHTLCState::Committed;
4205 mem::swap(&mut state, &mut htlc.state);
4207 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4208 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4209 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4210 require_commitment = true;
4211 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4212 match forward_info {
4213 PendingHTLCStatus::Fail(fail_msg) => {
4214 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4215 require_commitment = true;
4217 HTLCFailureMsg::Relay(msg) => {
4218 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4219 update_fail_htlcs.push(msg)
4221 HTLCFailureMsg::Malformed(msg) => {
4222 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4223 update_fail_malformed_htlcs.push(msg)
4227 PendingHTLCStatus::Forward(forward_info) => {
4228 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4229 to_forward_infos.push((forward_info, htlc.htlc_id));
4230 htlc.state = InboundHTLCState::Committed;
4236 for htlc in pending_outbound_htlcs.iter_mut() {
4237 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4238 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4239 htlc.state = OutboundHTLCState::Committed;
4240 *expecting_peer_commitment_signed = true;
4242 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4243 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4244 // Grab the preimage, if it exists, instead of cloning
4245 let mut reason = OutboundHTLCOutcome::Success(None);
4246 mem::swap(outcome, &mut reason);
4247 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4248 require_commitment = true;
4252 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4254 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4255 match update_state {
4256 FeeUpdateState::Outbound => {
4257 debug_assert!(self.context.is_outbound());
4258 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4259 self.context.feerate_per_kw = feerate;
4260 self.context.pending_update_fee = None;
4261 self.context.expecting_peer_commitment_signed = true;
4263 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4264 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4265 debug_assert!(!self.context.is_outbound());
4266 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4267 require_commitment = true;
4268 self.context.feerate_per_kw = feerate;
4269 self.context.pending_update_fee = None;
4274 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4275 let release_state_str =
4276 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4277 macro_rules! return_with_htlcs_to_fail {
4278 ($htlcs_to_fail: expr) => {
4279 if !release_monitor {
4280 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4281 update: monitor_update,
4283 return Ok(($htlcs_to_fail, None));
4285 return Ok(($htlcs_to_fail, Some(monitor_update)));
4290 if self.context.channel_state.is_monitor_update_in_progress() {
4291 // We can't actually generate a new commitment transaction (incl by freeing holding
4292 // cells) while we can't update the monitor, so we just return what we have.
4293 if require_commitment {
4294 self.context.monitor_pending_commitment_signed = true;
4295 // When the monitor updating is restored we'll call
4296 // get_last_commitment_update_for_send(), which does not update state, but we're
4297 // definitely now awaiting a remote revoke before we can step forward any more, so
4299 let mut additional_update = self.build_commitment_no_status_check(logger);
4300 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4301 // strictly increasing by one, so decrement it here.
4302 self.context.latest_monitor_update_id = monitor_update.update_id;
4303 monitor_update.updates.append(&mut additional_update.updates);
4305 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4306 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4307 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4308 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4309 return_with_htlcs_to_fail!(Vec::new());
4312 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4313 (Some(mut additional_update), htlcs_to_fail) => {
4314 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4315 // strictly increasing by one, so decrement it here.
4316 self.context.latest_monitor_update_id = monitor_update.update_id;
4317 monitor_update.updates.append(&mut additional_update.updates);
4319 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4320 &self.context.channel_id(), release_state_str);
4322 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4323 return_with_htlcs_to_fail!(htlcs_to_fail);
4325 (None, htlcs_to_fail) => {
4326 if require_commitment {
4327 let mut additional_update = self.build_commitment_no_status_check(logger);
4329 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4330 // strictly increasing by one, so decrement it here.
4331 self.context.latest_monitor_update_id = monitor_update.update_id;
4332 monitor_update.updates.append(&mut additional_update.updates);
4334 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4335 &self.context.channel_id(),
4336 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4339 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4340 return_with_htlcs_to_fail!(htlcs_to_fail);
4342 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4343 &self.context.channel_id(), release_state_str);
4345 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4346 return_with_htlcs_to_fail!(htlcs_to_fail);
4352 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4353 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4354 /// commitment update.
4355 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4356 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4357 where F::Target: FeeEstimator, L::Target: Logger
4359 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4360 assert!(msg_opt.is_none(), "We forced holding cell?");
4363 /// Adds a pending update to this channel. See the doc for send_htlc for
4364 /// further details on the optionness of the return value.
4365 /// If our balance is too low to cover the cost of the next commitment transaction at the
4366 /// new feerate, the update is cancelled.
4368 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4369 /// [`Channel`] if `force_holding_cell` is false.
4370 fn send_update_fee<F: Deref, L: Deref>(
4371 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4372 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4373 ) -> Option<msgs::UpdateFee>
4374 where F::Target: FeeEstimator, L::Target: Logger
4376 if !self.context.is_outbound() {
4377 panic!("Cannot send fee from inbound channel");
4379 if !self.context.is_usable() {
4380 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4382 if !self.context.is_live() {
4383 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4386 // Before proposing a feerate update, check that we can actually afford the new fee.
4387 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4388 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4389 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4390 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4391 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4392 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4393 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4394 //TODO: auto-close after a number of failures?
4395 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4399 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4400 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4401 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4402 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4403 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4404 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4407 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4408 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4412 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4413 force_holding_cell = true;
4416 if force_holding_cell {
4417 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4421 debug_assert!(self.context.pending_update_fee.is_none());
4422 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4424 Some(msgs::UpdateFee {
4425 channel_id: self.context.channel_id,
4430 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4431 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4433 /// No further message handling calls may be made until a channel_reestablish dance has
4435 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4436 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4437 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4438 if self.context.channel_state.is_pre_funded_state() {
4442 if self.context.channel_state.is_peer_disconnected() {
4443 // While the below code should be idempotent, it's simpler to just return early, as
4444 // redundant disconnect events can fire, though they should be rare.
4448 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4449 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4452 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4453 // will be retransmitted.
4454 self.context.last_sent_closing_fee = None;
4455 self.context.pending_counterparty_closing_signed = None;
4456 self.context.closing_fee_limits = None;
4458 let mut inbound_drop_count = 0;
4459 self.context.pending_inbound_htlcs.retain(|htlc| {
4461 InboundHTLCState::RemoteAnnounced(_) => {
4462 // They sent us an update_add_htlc but we never got the commitment_signed.
4463 // We'll tell them what commitment_signed we're expecting next and they'll drop
4464 // this HTLC accordingly
4465 inbound_drop_count += 1;
4468 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4469 // We received a commitment_signed updating this HTLC and (at least hopefully)
4470 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4471 // in response to it yet, so don't touch it.
4474 InboundHTLCState::Committed => true,
4475 InboundHTLCState::LocalRemoved(_) => {
4476 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4477 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4478 // (that we missed). Keep this around for now and if they tell us they missed
4479 // the commitment_signed we can re-transmit the update then.
4484 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4486 if let Some((_, update_state)) = self.context.pending_update_fee {
4487 if update_state == FeeUpdateState::RemoteAnnounced {
4488 debug_assert!(!self.context.is_outbound());
4489 self.context.pending_update_fee = None;
4493 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4494 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4495 // They sent us an update to remove this but haven't yet sent the corresponding
4496 // commitment_signed, we need to move it back to Committed and they can re-send
4497 // the update upon reconnection.
4498 htlc.state = OutboundHTLCState::Committed;
4502 self.context.sent_message_awaiting_response = None;
4504 self.context.channel_state.set_peer_disconnected();
4505 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4509 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4510 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4511 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4512 /// update completes (potentially immediately).
4513 /// The messages which were generated with the monitor update must *not* have been sent to the
4514 /// remote end, and must instead have been dropped. They will be regenerated when
4515 /// [`Self::monitor_updating_restored`] is called.
4517 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4518 /// [`chain::Watch`]: crate::chain::Watch
4519 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4520 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4521 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4522 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4523 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4525 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4526 self.context.monitor_pending_commitment_signed |= resend_commitment;
4527 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4528 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4529 self.context.monitor_pending_failures.append(&mut pending_fails);
4530 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4531 self.context.channel_state.set_monitor_update_in_progress();
4534 /// Indicates that the latest ChannelMonitor update has been committed by the client
4535 /// successfully and we should restore normal operation. Returns messages which should be sent
4536 /// to the remote side.
4537 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4538 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4539 user_config: &UserConfig, best_block_height: u32
4540 ) -> MonitorRestoreUpdates
4543 NS::Target: NodeSigner
4545 assert!(self.context.channel_state.is_monitor_update_in_progress());
4546 self.context.channel_state.clear_monitor_update_in_progress();
4548 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4549 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4550 // first received the funding_signed.
4551 let mut funding_broadcastable =
4552 if self.context.is_outbound() &&
4553 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4554 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
4556 self.context.funding_transaction.take()
4558 // That said, if the funding transaction is already confirmed (ie we're active with a
4559 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4560 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4561 funding_broadcastable = None;
4564 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4565 // (and we assume the user never directly broadcasts the funding transaction and waits for
4566 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4567 // * an inbound channel that failed to persist the monitor on funding_created and we got
4568 // the funding transaction confirmed before the monitor was persisted, or
4569 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4570 let channel_ready = if self.context.monitor_pending_channel_ready {
4571 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4572 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4573 self.context.monitor_pending_channel_ready = false;
4574 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4575 Some(msgs::ChannelReady {
4576 channel_id: self.context.channel_id(),
4577 next_per_commitment_point,
4578 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4582 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4584 let mut accepted_htlcs = Vec::new();
4585 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4586 let mut failed_htlcs = Vec::new();
4587 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4588 let mut finalized_claimed_htlcs = Vec::new();
4589 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4591 if self.context.channel_state.is_peer_disconnected() {
4592 self.context.monitor_pending_revoke_and_ack = false;
4593 self.context.monitor_pending_commitment_signed = false;
4594 return MonitorRestoreUpdates {
4595 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4596 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4600 let raa = if self.context.monitor_pending_revoke_and_ack {
4601 Some(self.get_last_revoke_and_ack())
4603 let commitment_update = if self.context.monitor_pending_commitment_signed {
4604 self.get_last_commitment_update_for_send(logger).ok()
4606 if commitment_update.is_some() {
4607 self.mark_awaiting_response();
4610 self.context.monitor_pending_revoke_and_ack = false;
4611 self.context.monitor_pending_commitment_signed = false;
4612 let order = self.context.resend_order.clone();
4613 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4614 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4615 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4616 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4617 MonitorRestoreUpdates {
4618 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4622 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4623 where F::Target: FeeEstimator, L::Target: Logger
4625 if self.context.is_outbound() {
4626 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4628 if self.context.channel_state.is_peer_disconnected() {
4629 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4631 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4633 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4634 self.context.update_time_counter += 1;
4635 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4636 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4637 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4638 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4639 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4640 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4641 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4642 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4643 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4644 msg.feerate_per_kw, holder_tx_dust_exposure)));
4646 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4647 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4648 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4654 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4656 #[cfg(async_signing)]
4657 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4658 let commitment_update = if self.context.signer_pending_commitment_update {
4659 self.get_last_commitment_update_for_send(logger).ok()
4661 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4662 self.context.get_funding_signed_msg(logger).1
4664 let channel_ready = if funding_signed.is_some() {
4665 self.check_get_channel_ready(0)
4668 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4669 if commitment_update.is_some() { "a" } else { "no" },
4670 if funding_signed.is_some() { "a" } else { "no" },
4671 if channel_ready.is_some() { "a" } else { "no" });
4673 SignerResumeUpdates {
4680 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4681 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4682 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4683 msgs::RevokeAndACK {
4684 channel_id: self.context.channel_id,
4685 per_commitment_secret,
4686 next_per_commitment_point,
4688 next_local_nonce: None,
4692 /// Gets the last commitment update for immediate sending to our peer.
4693 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4694 let mut update_add_htlcs = Vec::new();
4695 let mut update_fulfill_htlcs = Vec::new();
4696 let mut update_fail_htlcs = Vec::new();
4697 let mut update_fail_malformed_htlcs = Vec::new();
4699 for htlc in self.context.pending_outbound_htlcs.iter() {
4700 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4701 update_add_htlcs.push(msgs::UpdateAddHTLC {
4702 channel_id: self.context.channel_id(),
4703 htlc_id: htlc.htlc_id,
4704 amount_msat: htlc.amount_msat,
4705 payment_hash: htlc.payment_hash,
4706 cltv_expiry: htlc.cltv_expiry,
4707 onion_routing_packet: (**onion_packet).clone(),
4708 skimmed_fee_msat: htlc.skimmed_fee_msat,
4709 blinding_point: htlc.blinding_point,
4714 for htlc in self.context.pending_inbound_htlcs.iter() {
4715 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4717 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4718 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4719 channel_id: self.context.channel_id(),
4720 htlc_id: htlc.htlc_id,
4721 reason: err_packet.clone()
4724 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4725 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4726 channel_id: self.context.channel_id(),
4727 htlc_id: htlc.htlc_id,
4728 sha256_of_onion: sha256_of_onion.clone(),
4729 failure_code: failure_code.clone(),
4732 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4733 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4734 channel_id: self.context.channel_id(),
4735 htlc_id: htlc.htlc_id,
4736 payment_preimage: payment_preimage.clone(),
4743 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4744 Some(msgs::UpdateFee {
4745 channel_id: self.context.channel_id(),
4746 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4750 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4751 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4752 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4753 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4754 if self.context.signer_pending_commitment_update {
4755 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4756 self.context.signer_pending_commitment_update = false;
4760 #[cfg(not(async_signing))] {
4761 panic!("Failed to get signature for new commitment state");
4763 #[cfg(async_signing)] {
4764 if !self.context.signer_pending_commitment_update {
4765 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4766 self.context.signer_pending_commitment_update = true;
4771 Ok(msgs::CommitmentUpdate {
4772 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4777 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4778 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4779 if self.context.channel_state.is_local_shutdown_sent() {
4780 assert!(self.context.shutdown_scriptpubkey.is_some());
4781 Some(msgs::Shutdown {
4782 channel_id: self.context.channel_id,
4783 scriptpubkey: self.get_closing_scriptpubkey(),
4788 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4789 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4791 /// Some links printed in log lines are included here to check them during build (when run with
4792 /// `cargo doc --document-private-items`):
4793 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4794 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4795 pub fn channel_reestablish<L: Deref, NS: Deref>(
4796 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4797 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4798 ) -> Result<ReestablishResponses, ChannelError>
4801 NS::Target: NodeSigner
4803 if !self.context.channel_state.is_peer_disconnected() {
4804 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4805 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4806 // just close here instead of trying to recover.
4807 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4810 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4811 msg.next_local_commitment_number == 0 {
4812 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4815 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4816 if msg.next_remote_commitment_number > 0 {
4817 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4818 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4819 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4820 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4821 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4823 if msg.next_remote_commitment_number > our_commitment_transaction {
4824 macro_rules! log_and_panic {
4825 ($err_msg: expr) => {
4826 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4827 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4830 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4831 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4832 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4833 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4834 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4835 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4836 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4837 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4841 // Before we change the state of the channel, we check if the peer is sending a very old
4842 // commitment transaction number, if yes we send a warning message.
4843 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4844 return Err(ChannelError::Warn(format!(
4845 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4846 msg.next_remote_commitment_number,
4847 our_commitment_transaction
4851 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4852 // remaining cases either succeed or ErrorMessage-fail).
4853 self.context.channel_state.clear_peer_disconnected();
4854 self.context.sent_message_awaiting_response = None;
4856 let shutdown_msg = self.get_outbound_shutdown();
4858 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4860 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4861 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4862 if !self.context.channel_state.is_our_channel_ready() ||
4863 self.context.channel_state.is_monitor_update_in_progress() {
4864 if msg.next_remote_commitment_number != 0 {
4865 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4867 // Short circuit the whole handler as there is nothing we can resend them
4868 return Ok(ReestablishResponses {
4869 channel_ready: None,
4870 raa: None, commitment_update: None,
4871 order: RAACommitmentOrder::CommitmentFirst,
4872 shutdown_msg, announcement_sigs,
4876 // We have OurChannelReady set!
4877 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4878 return Ok(ReestablishResponses {
4879 channel_ready: Some(msgs::ChannelReady {
4880 channel_id: self.context.channel_id(),
4881 next_per_commitment_point,
4882 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4884 raa: None, commitment_update: None,
4885 order: RAACommitmentOrder::CommitmentFirst,
4886 shutdown_msg, announcement_sigs,
4890 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4891 // Remote isn't waiting on any RevokeAndACK from us!
4892 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4894 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4895 if self.context.channel_state.is_monitor_update_in_progress() {
4896 self.context.monitor_pending_revoke_and_ack = true;
4899 Some(self.get_last_revoke_and_ack())
4902 debug_assert!(false, "All values should have been handled in the four cases above");
4903 return Err(ChannelError::Close(format!(
4904 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4905 msg.next_remote_commitment_number,
4906 our_commitment_transaction
4910 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4911 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4912 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4913 // the corresponding revoke_and_ack back yet.
4914 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4915 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4916 self.mark_awaiting_response();
4918 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4920 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4921 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4922 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4923 Some(msgs::ChannelReady {
4924 channel_id: self.context.channel_id(),
4925 next_per_commitment_point,
4926 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4930 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4931 if required_revoke.is_some() {
4932 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4934 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4937 Ok(ReestablishResponses {
4938 channel_ready, shutdown_msg, announcement_sigs,
4939 raa: required_revoke,
4940 commitment_update: None,
4941 order: self.context.resend_order.clone(),
4943 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4944 if required_revoke.is_some() {
4945 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4947 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4950 if self.context.channel_state.is_monitor_update_in_progress() {
4951 self.context.monitor_pending_commitment_signed = true;
4952 Ok(ReestablishResponses {
4953 channel_ready, shutdown_msg, announcement_sigs,
4954 commitment_update: None, raa: None,
4955 order: self.context.resend_order.clone(),
4958 Ok(ReestablishResponses {
4959 channel_ready, shutdown_msg, announcement_sigs,
4960 raa: required_revoke,
4961 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4962 order: self.context.resend_order.clone(),
4965 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4966 Err(ChannelError::Close(format!(
4967 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4968 msg.next_local_commitment_number,
4969 next_counterparty_commitment_number,
4972 Err(ChannelError::Close(format!(
4973 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4974 msg.next_local_commitment_number,
4975 next_counterparty_commitment_number,
4980 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4981 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4982 /// at which point they will be recalculated.
4983 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4985 where F::Target: FeeEstimator
4987 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4989 // Propose a range from our current Background feerate to our Normal feerate plus our
4990 // force_close_avoidance_max_fee_satoshis.
4991 // If we fail to come to consensus, we'll have to force-close.
4992 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4993 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4994 // that we don't expect to need fee bumping
4995 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4996 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4998 // The spec requires that (when the channel does not have anchors) we only send absolute
4999 // channel fees no greater than the absolute channel fee on the current commitment
5000 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5001 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5002 // some force-closure by old nodes, but we wanted to close the channel anyway.
5004 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5005 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5006 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5007 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5010 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5011 // below our dust limit, causing the output to disappear. We don't bother handling this
5012 // case, however, as this should only happen if a channel is closed before any (material)
5013 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5014 // come to consensus with our counterparty on appropriate fees, however it should be a
5015 // relatively rare case. We can revisit this later, though note that in order to determine
5016 // if the funders' output is dust we have to know the absolute fee we're going to use.
5017 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5018 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5019 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5020 // We always add force_close_avoidance_max_fee_satoshis to our normal
5021 // feerate-calculated fee, but allow the max to be overridden if we're using a
5022 // target feerate-calculated fee.
5023 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5024 proposed_max_feerate as u64 * tx_weight / 1000)
5026 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5029 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5030 self.context.closing_fee_limits.clone().unwrap()
5033 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5034 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5035 /// this point if we're the funder we should send the initial closing_signed, and in any case
5036 /// shutdown should complete within a reasonable timeframe.
5037 fn closing_negotiation_ready(&self) -> bool {
5038 self.context.closing_negotiation_ready()
5041 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5042 /// an Err if no progress is being made and the channel should be force-closed instead.
5043 /// Should be called on a one-minute timer.
5044 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5045 if self.closing_negotiation_ready() {
5046 if self.context.closing_signed_in_flight {
5047 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5049 self.context.closing_signed_in_flight = true;
5055 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5056 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5057 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5058 where F::Target: FeeEstimator, L::Target: Logger
5060 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5061 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5062 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5063 // that closing_negotiation_ready checks this case (as well as a few others).
5064 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5065 return Ok((None, None, None));
5068 if !self.context.is_outbound() {
5069 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5070 return self.closing_signed(fee_estimator, &msg);
5072 return Ok((None, None, None));
5075 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5076 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5077 if self.context.expecting_peer_commitment_signed {
5078 return Ok((None, None, None));
5081 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5083 assert!(self.context.shutdown_scriptpubkey.is_some());
5084 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5085 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5086 our_min_fee, our_max_fee, total_fee_satoshis);
5088 match &self.context.holder_signer {
5089 ChannelSignerType::Ecdsa(ecdsa) => {
5091 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5092 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5094 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5095 Ok((Some(msgs::ClosingSigned {
5096 channel_id: self.context.channel_id,
5097 fee_satoshis: total_fee_satoshis,
5099 fee_range: Some(msgs::ClosingSignedFeeRange {
5100 min_fee_satoshis: our_min_fee,
5101 max_fee_satoshis: our_max_fee,
5105 // TODO (taproot|arik)
5111 // Marks a channel as waiting for a response from the counterparty. If it's not received
5112 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5114 fn mark_awaiting_response(&mut self) {
5115 self.context.sent_message_awaiting_response = Some(0);
5118 /// Determines whether we should disconnect the counterparty due to not receiving a response
5119 /// within our expected timeframe.
5121 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5122 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5123 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5126 // Don't disconnect when we're not waiting on a response.
5129 *ticks_elapsed += 1;
5130 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5134 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5135 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5137 if self.context.channel_state.is_peer_disconnected() {
5138 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5140 if self.context.channel_state.is_pre_funded_state() {
5141 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5142 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5143 // can do that via error message without getting a connection fail anyway...
5144 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5146 for htlc in self.context.pending_inbound_htlcs.iter() {
5147 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5148 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5151 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5153 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5154 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5157 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5158 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5159 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5162 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5165 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5166 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5167 // any further commitment updates after we set LocalShutdownSent.
5168 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5170 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5173 assert!(send_shutdown);
5174 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5175 Ok(scriptpubkey) => scriptpubkey,
5176 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5178 if !shutdown_scriptpubkey.is_compatible(their_features) {
5179 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5181 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5186 // From here on out, we may not fail!
5188 self.context.channel_state.set_remote_shutdown_sent();
5189 self.context.update_time_counter += 1;
5191 let monitor_update = if update_shutdown_script {
5192 self.context.latest_monitor_update_id += 1;
5193 let monitor_update = ChannelMonitorUpdate {
5194 update_id: self.context.latest_monitor_update_id,
5195 counterparty_node_id: Some(self.context.counterparty_node_id),
5196 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5197 scriptpubkey: self.get_closing_scriptpubkey(),
5199 channel_id: Some(self.context.channel_id()),
5201 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5202 self.push_ret_blockable_mon_update(monitor_update)
5204 let shutdown = if send_shutdown {
5205 Some(msgs::Shutdown {
5206 channel_id: self.context.channel_id,
5207 scriptpubkey: self.get_closing_scriptpubkey(),
5211 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5212 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5213 // cell HTLCs and return them to fail the payment.
5214 self.context.holding_cell_update_fee = None;
5215 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5216 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5218 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5219 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5226 self.context.channel_state.set_local_shutdown_sent();
5227 self.context.update_time_counter += 1;
5229 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5232 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5233 let mut tx = closing_tx.trust().built_transaction().clone();
5235 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5237 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5238 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5239 let mut holder_sig = sig.serialize_der().to_vec();
5240 holder_sig.push(EcdsaSighashType::All as u8);
5241 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5242 cp_sig.push(EcdsaSighashType::All as u8);
5243 if funding_key[..] < counterparty_funding_key[..] {
5244 tx.input[0].witness.push(holder_sig);
5245 tx.input[0].witness.push(cp_sig);
5247 tx.input[0].witness.push(cp_sig);
5248 tx.input[0].witness.push(holder_sig);
5251 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5255 pub fn closing_signed<F: Deref>(
5256 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5257 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5258 where F::Target: FeeEstimator
5260 if !self.context.channel_state.is_both_sides_shutdown() {
5261 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5263 if self.context.channel_state.is_peer_disconnected() {
5264 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5266 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5267 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5269 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5270 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5273 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5274 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5277 if self.context.channel_state.is_monitor_update_in_progress() {
5278 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5279 return Ok((None, None, None));
5282 let funding_redeemscript = self.context.get_funding_redeemscript();
5283 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5284 if used_total_fee != msg.fee_satoshis {
5285 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5287 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5289 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5292 // The remote end may have decided to revoke their output due to inconsistent dust
5293 // limits, so check for that case by re-checking the signature here.
5294 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5295 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5296 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5300 for outp in closing_tx.trust().built_transaction().output.iter() {
5301 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5302 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5306 let closure_reason = if self.initiated_shutdown() {
5307 ClosureReason::LocallyInitiatedCooperativeClosure
5309 ClosureReason::CounterpartyInitiatedCooperativeClosure
5312 assert!(self.context.shutdown_scriptpubkey.is_some());
5313 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5314 if last_fee == msg.fee_satoshis {
5315 let shutdown_result = ShutdownResult {
5317 monitor_update: None,
5318 dropped_outbound_htlcs: Vec::new(),
5319 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5320 channel_id: self.context.channel_id,
5321 user_channel_id: self.context.user_id,
5322 channel_capacity_satoshis: self.context.channel_value_satoshis,
5323 counterparty_node_id: self.context.counterparty_node_id,
5324 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5325 channel_funding_txo: self.context.get_funding_txo(),
5327 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5328 self.context.channel_state = ChannelState::ShutdownComplete;
5329 self.context.update_time_counter += 1;
5330 return Ok((None, Some(tx), Some(shutdown_result)));
5334 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5336 macro_rules! propose_fee {
5337 ($new_fee: expr) => {
5338 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5339 (closing_tx, $new_fee)
5341 self.build_closing_transaction($new_fee, false)
5344 return match &self.context.holder_signer {
5345 ChannelSignerType::Ecdsa(ecdsa) => {
5347 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5348 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5349 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5350 let shutdown_result = ShutdownResult {
5352 monitor_update: None,
5353 dropped_outbound_htlcs: Vec::new(),
5354 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5355 channel_id: self.context.channel_id,
5356 user_channel_id: self.context.user_id,
5357 channel_capacity_satoshis: self.context.channel_value_satoshis,
5358 counterparty_node_id: self.context.counterparty_node_id,
5359 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5360 channel_funding_txo: self.context.get_funding_txo(),
5362 self.context.channel_state = ChannelState::ShutdownComplete;
5363 self.context.update_time_counter += 1;
5364 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5365 (Some(tx), Some(shutdown_result))
5370 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5371 Ok((Some(msgs::ClosingSigned {
5372 channel_id: self.context.channel_id,
5373 fee_satoshis: used_fee,
5375 fee_range: Some(msgs::ClosingSignedFeeRange {
5376 min_fee_satoshis: our_min_fee,
5377 max_fee_satoshis: our_max_fee,
5379 }), signed_tx, shutdown_result))
5381 // TODO (taproot|arik)
5388 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5389 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5390 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5392 if max_fee_satoshis < our_min_fee {
5393 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5395 if min_fee_satoshis > our_max_fee {
5396 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5399 if !self.context.is_outbound() {
5400 // They have to pay, so pick the highest fee in the overlapping range.
5401 // We should never set an upper bound aside from their full balance
5402 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5403 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5405 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5406 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5407 msg.fee_satoshis, our_min_fee, our_max_fee)));
5409 // The proposed fee is in our acceptable range, accept it and broadcast!
5410 propose_fee!(msg.fee_satoshis);
5413 // Old fee style negotiation. We don't bother to enforce whether they are complying
5414 // with the "making progress" requirements, we just comply and hope for the best.
5415 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5416 if msg.fee_satoshis > last_fee {
5417 if msg.fee_satoshis < our_max_fee {
5418 propose_fee!(msg.fee_satoshis);
5419 } else if last_fee < our_max_fee {
5420 propose_fee!(our_max_fee);
5422 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5425 if msg.fee_satoshis > our_min_fee {
5426 propose_fee!(msg.fee_satoshis);
5427 } else if last_fee > our_min_fee {
5428 propose_fee!(our_min_fee);
5430 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5434 if msg.fee_satoshis < our_min_fee {
5435 propose_fee!(our_min_fee);
5436 } else if msg.fee_satoshis > our_max_fee {
5437 propose_fee!(our_max_fee);
5439 propose_fee!(msg.fee_satoshis);
5445 fn internal_htlc_satisfies_config(
5446 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5447 ) -> Result<(), (&'static str, u16)> {
5448 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5449 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5450 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5451 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5453 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5454 0x1000 | 12, // fee_insufficient
5457 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5459 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5460 0x1000 | 13, // incorrect_cltv_expiry
5466 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5467 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5468 /// unsuccessful, falls back to the previous one if one exists.
5469 pub fn htlc_satisfies_config(
5470 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5471 ) -> Result<(), (&'static str, u16)> {
5472 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5474 if let Some(prev_config) = self.context.prev_config() {
5475 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5482 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5483 self.context.cur_holder_commitment_transaction_number + 1
5486 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5487 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5490 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5491 self.context.cur_counterparty_commitment_transaction_number + 2
5495 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5496 &self.context.holder_signer
5500 pub fn get_value_stat(&self) -> ChannelValueStat {
5502 value_to_self_msat: self.context.value_to_self_msat,
5503 channel_value_msat: self.context.channel_value_satoshis * 1000,
5504 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5505 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5506 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5507 holding_cell_outbound_amount_msat: {
5509 for h in self.context.holding_cell_htlc_updates.iter() {
5511 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5519 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5520 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5524 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5525 /// Allowed in any state (including after shutdown)
5526 pub fn is_awaiting_monitor_update(&self) -> bool {
5527 self.context.channel_state.is_monitor_update_in_progress()
5530 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5531 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5532 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5533 self.context.blocked_monitor_updates[0].update.update_id - 1
5536 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5537 /// further blocked monitor update exists after the next.
5538 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5539 if self.context.blocked_monitor_updates.is_empty() { return None; }
5540 Some((self.context.blocked_monitor_updates.remove(0).update,
5541 !self.context.blocked_monitor_updates.is_empty()))
5544 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5545 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5546 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5547 -> Option<ChannelMonitorUpdate> {
5548 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5549 if !release_monitor {
5550 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5559 pub fn blocked_monitor_updates_pending(&self) -> usize {
5560 self.context.blocked_monitor_updates.len()
5563 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5564 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5565 /// transaction. If the channel is inbound, this implies simply that the channel has not
5567 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5568 if !self.is_awaiting_monitor_update() { return false; }
5570 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5571 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
5573 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5574 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5575 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5578 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5579 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5580 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5581 // waiting for the initial monitor persistence. Thus, we check if our commitment
5582 // transaction numbers have both been iterated only exactly once (for the
5583 // funding_signed), and we're awaiting monitor update.
5585 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5586 // only way to get an awaiting-monitor-update state during initial funding is if the
5587 // initial monitor persistence is still pending).
5589 // Because deciding we're awaiting initial broadcast spuriously could result in
5590 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5591 // we hard-assert here, even in production builds.
5592 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5593 assert!(self.context.monitor_pending_channel_ready);
5594 assert_eq!(self.context.latest_monitor_update_id, 0);
5600 /// Returns true if our channel_ready has been sent
5601 pub fn is_our_channel_ready(&self) -> bool {
5602 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5603 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5606 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5607 pub fn received_shutdown(&self) -> bool {
5608 self.context.channel_state.is_remote_shutdown_sent()
5611 /// Returns true if we either initiated or agreed to shut down the channel.
5612 pub fn sent_shutdown(&self) -> bool {
5613 self.context.channel_state.is_local_shutdown_sent()
5616 /// Returns true if we initiated to shut down the channel.
5617 pub fn initiated_shutdown(&self) -> bool {
5618 self.context.local_initiated_shutdown.is_some()
5621 /// Returns true if this channel is fully shut down. True here implies that no further actions
5622 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5623 /// will be handled appropriately by the chain monitor.
5624 pub fn is_shutdown(&self) -> bool {
5625 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5628 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5629 self.context.channel_update_status
5632 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5633 self.context.update_time_counter += 1;
5634 self.context.channel_update_status = status;
5637 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5639 // * always when a new block/transactions are confirmed with the new height
5640 // * when funding is signed with a height of 0
5641 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5645 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5646 if funding_tx_confirmations <= 0 {
5647 self.context.funding_tx_confirmation_height = 0;
5650 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5654 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5655 // channel_ready yet.
5656 if self.context.signer_pending_funding {
5660 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5661 // channel_ready until the entire batch is ready.
5662 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
5663 self.context.channel_state.set_our_channel_ready();
5665 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5666 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5667 self.context.update_time_counter += 1;
5669 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5670 // We got a reorg but not enough to trigger a force close, just ignore.
5673 if self.context.funding_tx_confirmation_height != 0 &&
5674 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5676 // We should never see a funding transaction on-chain until we've received
5677 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5678 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5679 // however, may do this and we shouldn't treat it as a bug.
5680 #[cfg(not(fuzzing))]
5681 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5682 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5683 self.context.channel_state.to_u32());
5685 // We got a reorg but not enough to trigger a force close, just ignore.
5689 if need_commitment_update {
5690 if !self.context.channel_state.is_monitor_update_in_progress() {
5691 if !self.context.channel_state.is_peer_disconnected() {
5692 let next_per_commitment_point =
5693 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5694 return Some(msgs::ChannelReady {
5695 channel_id: self.context.channel_id,
5696 next_per_commitment_point,
5697 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5701 self.context.monitor_pending_channel_ready = true;
5707 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5708 /// In the first case, we store the confirmation height and calculating the short channel id.
5709 /// In the second, we simply return an Err indicating we need to be force-closed now.
5710 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5711 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5712 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5713 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5715 NS::Target: NodeSigner,
5718 let mut msgs = (None, None);
5719 if let Some(funding_txo) = self.context.get_funding_txo() {
5720 for &(index_in_block, tx) in txdata.iter() {
5721 // Check if the transaction is the expected funding transaction, and if it is,
5722 // check that it pays the right amount to the right script.
5723 if self.context.funding_tx_confirmation_height == 0 {
5724 if tx.txid() == funding_txo.txid {
5725 let txo_idx = funding_txo.index as usize;
5726 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5727 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5728 if self.context.is_outbound() {
5729 // If we generated the funding transaction and it doesn't match what it
5730 // should, the client is really broken and we should just panic and
5731 // tell them off. That said, because hash collisions happen with high
5732 // probability in fuzzing mode, if we're fuzzing we just close the
5733 // channel and move on.
5734 #[cfg(not(fuzzing))]
5735 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5737 self.context.update_time_counter += 1;
5738 let err_reason = "funding tx had wrong script/value or output index";
5739 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5741 if self.context.is_outbound() {
5742 if !tx.is_coin_base() {
5743 for input in tx.input.iter() {
5744 if input.witness.is_empty() {
5745 // We generated a malleable funding transaction, implying we've
5746 // just exposed ourselves to funds loss to our counterparty.
5747 #[cfg(not(fuzzing))]
5748 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5753 self.context.funding_tx_confirmation_height = height;
5754 self.context.funding_tx_confirmed_in = Some(*block_hash);
5755 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5756 Ok(scid) => Some(scid),
5757 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5760 // If this is a coinbase transaction and not a 0-conf channel
5761 // we should update our min_depth to 100 to handle coinbase maturity
5762 if tx.is_coin_base() &&
5763 self.context.minimum_depth.unwrap_or(0) > 0 &&
5764 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5765 self.context.minimum_depth = Some(COINBASE_MATURITY);
5768 // If we allow 1-conf funding, we may need to check for channel_ready here and
5769 // send it immediately instead of waiting for a best_block_updated call (which
5770 // may have already happened for this block).
5771 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5772 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5773 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5774 msgs = (Some(channel_ready), announcement_sigs);
5777 for inp in tx.input.iter() {
5778 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5779 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5780 return Err(ClosureReason::CommitmentTxConfirmed);
5788 /// When a new block is connected, we check the height of the block against outbound holding
5789 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5790 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5791 /// handled by the ChannelMonitor.
5793 /// If we return Err, the channel may have been closed, at which point the standard
5794 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5797 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5799 pub fn best_block_updated<NS: Deref, L: Deref>(
5800 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5801 node_signer: &NS, user_config: &UserConfig, logger: &L
5802 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5804 NS::Target: NodeSigner,
5807 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5810 fn do_best_block_updated<NS: Deref, L: Deref>(
5811 &mut self, height: u32, highest_header_time: u32,
5812 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5813 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5815 NS::Target: NodeSigner,
5818 let mut timed_out_htlcs = Vec::new();
5819 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5820 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5822 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5823 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5825 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5826 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5827 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5835 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5837 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5838 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5839 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5841 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5842 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5845 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5846 self.context.channel_state.is_our_channel_ready() {
5847 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5848 if self.context.funding_tx_confirmation_height == 0 {
5849 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5850 // zero if it has been reorged out, however in either case, our state flags
5851 // indicate we've already sent a channel_ready
5852 funding_tx_confirmations = 0;
5855 // If we've sent channel_ready (or have both sent and received channel_ready), and
5856 // the funding transaction has become unconfirmed,
5857 // close the channel and hope we can get the latest state on chain (because presumably
5858 // the funding transaction is at least still in the mempool of most nodes).
5860 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5861 // 0-conf channel, but not doing so may lead to the
5862 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5864 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5865 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5866 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5867 return Err(ClosureReason::ProcessingError { err: err_reason });
5869 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5870 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5871 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5872 // If funding_tx_confirmed_in is unset, the channel must not be active
5873 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5874 assert!(!self.context.channel_state.is_our_channel_ready());
5875 return Err(ClosureReason::FundingTimedOut);
5878 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5879 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5881 Ok((None, timed_out_htlcs, announcement_sigs))
5884 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5885 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5886 /// before the channel has reached channel_ready and we can just wait for more blocks.
5887 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5888 if self.context.funding_tx_confirmation_height != 0 {
5889 // We handle the funding disconnection by calling best_block_updated with a height one
5890 // below where our funding was connected, implying a reorg back to conf_height - 1.
5891 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5892 // We use the time field to bump the current time we set on channel updates if its
5893 // larger. If we don't know that time has moved forward, we can just set it to the last
5894 // time we saw and it will be ignored.
5895 let best_time = self.context.update_time_counter;
5896 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5897 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5898 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5899 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5900 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5906 // We never learned about the funding confirmation anyway, just ignore
5911 // Methods to get unprompted messages to send to the remote end (or where we already returned
5912 // something in the handler for the message that prompted this message):
5914 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5915 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5916 /// directions). Should be used for both broadcasted announcements and in response to an
5917 /// AnnouncementSignatures message from the remote peer.
5919 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5922 /// This will only return ChannelError::Ignore upon failure.
5924 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5925 fn get_channel_announcement<NS: Deref>(
5926 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5927 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5928 if !self.context.config.announced_channel {
5929 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5931 if !self.context.is_usable() {
5932 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5935 let short_channel_id = self.context.get_short_channel_id()
5936 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5937 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5938 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5939 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5940 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5942 let msg = msgs::UnsignedChannelAnnouncement {
5943 features: channelmanager::provided_channel_features(&user_config),
5946 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5947 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5948 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5949 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5950 excess_data: Vec::new(),
5956 fn get_announcement_sigs<NS: Deref, L: Deref>(
5957 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5958 best_block_height: u32, logger: &L
5959 ) -> Option<msgs::AnnouncementSignatures>
5961 NS::Target: NodeSigner,
5964 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5968 if !self.context.is_usable() {
5972 if self.context.channel_state.is_peer_disconnected() {
5973 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5977 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5981 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5982 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5985 log_trace!(logger, "{:?}", e);
5989 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5991 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5996 match &self.context.holder_signer {
5997 ChannelSignerType::Ecdsa(ecdsa) => {
5998 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6000 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6005 let short_channel_id = match self.context.get_short_channel_id() {
6007 None => return None,
6010 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6012 Some(msgs::AnnouncementSignatures {
6013 channel_id: self.context.channel_id(),
6015 node_signature: our_node_sig,
6016 bitcoin_signature: our_bitcoin_sig,
6019 // TODO (taproot|arik)
6025 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6027 fn sign_channel_announcement<NS: Deref>(
6028 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6029 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6030 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6031 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6032 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6033 let were_node_one = announcement.node_id_1 == our_node_key;
6035 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6036 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6037 match &self.context.holder_signer {
6038 ChannelSignerType::Ecdsa(ecdsa) => {
6039 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6040 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6041 Ok(msgs::ChannelAnnouncement {
6042 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6043 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6044 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6045 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6046 contents: announcement,
6049 // TODO (taproot|arik)
6054 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6058 /// Processes an incoming announcement_signatures message, providing a fully-signed
6059 /// channel_announcement message which we can broadcast and storing our counterparty's
6060 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6061 pub fn announcement_signatures<NS: Deref>(
6062 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6063 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6064 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6065 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6067 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6069 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6070 return Err(ChannelError::Close(format!(
6071 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6072 &announcement, self.context.get_counterparty_node_id())));
6074 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6075 return Err(ChannelError::Close(format!(
6076 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6077 &announcement, self.context.counterparty_funding_pubkey())));
6080 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6081 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6082 return Err(ChannelError::Ignore(
6083 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6086 self.sign_channel_announcement(node_signer, announcement)
6089 /// Gets a signed channel_announcement for this channel, if we previously received an
6090 /// announcement_signatures from our counterparty.
6091 pub fn get_signed_channel_announcement<NS: Deref>(
6092 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6093 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6094 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6097 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6099 Err(_) => return None,
6101 match self.sign_channel_announcement(node_signer, announcement) {
6102 Ok(res) => Some(res),
6107 /// May panic if called on a channel that wasn't immediately-previously
6108 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6109 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6110 assert!(self.context.channel_state.is_peer_disconnected());
6111 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6112 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6113 // current to_remote balances. However, it no longer has any use, and thus is now simply
6114 // set to a dummy (but valid, as required by the spec) public key.
6115 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6116 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6117 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6118 let mut pk = [2; 33]; pk[1] = 0xff;
6119 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6120 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6121 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6122 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6125 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6128 self.mark_awaiting_response();
6129 msgs::ChannelReestablish {
6130 channel_id: self.context.channel_id(),
6131 // The protocol has two different commitment number concepts - the "commitment
6132 // transaction number", which starts from 0 and counts up, and the "revocation key
6133 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6134 // commitment transaction numbers by the index which will be used to reveal the
6135 // revocation key for that commitment transaction, which means we have to convert them
6136 // to protocol-level commitment numbers here...
6138 // next_local_commitment_number is the next commitment_signed number we expect to
6139 // receive (indicating if they need to resend one that we missed).
6140 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6141 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6142 // receive, however we track it by the next commitment number for a remote transaction
6143 // (which is one further, as they always revoke previous commitment transaction, not
6144 // the one we send) so we have to decrement by 1. Note that if
6145 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6146 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6148 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6149 your_last_per_commitment_secret: remote_last_secret,
6150 my_current_per_commitment_point: dummy_pubkey,
6151 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6152 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6153 // txid of that interactive transaction, else we MUST NOT set it.
6154 next_funding_txid: None,
6159 // Send stuff to our remote peers:
6161 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6162 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6163 /// commitment update.
6165 /// `Err`s will only be [`ChannelError::Ignore`].
6166 pub fn queue_add_htlc<F: Deref, L: Deref>(
6167 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6168 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6169 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6170 ) -> Result<(), ChannelError>
6171 where F::Target: FeeEstimator, L::Target: Logger
6174 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6175 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6176 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6178 if let ChannelError::Ignore(_) = err { /* fine */ }
6179 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6184 /// Adds a pending outbound HTLC to this channel, note that you probably want
6185 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6187 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6189 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6190 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6192 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6193 /// we may not yet have sent the previous commitment update messages and will need to
6194 /// regenerate them.
6196 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6197 /// on this [`Channel`] if `force_holding_cell` is false.
6199 /// `Err`s will only be [`ChannelError::Ignore`].
6200 fn send_htlc<F: Deref, L: Deref>(
6201 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6202 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6203 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6204 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6205 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6206 where F::Target: FeeEstimator, L::Target: Logger
6208 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6209 self.context.channel_state.is_local_shutdown_sent() ||
6210 self.context.channel_state.is_remote_shutdown_sent()
6212 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6214 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6215 if amount_msat > channel_total_msat {
6216 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6219 if amount_msat == 0 {
6220 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6223 let available_balances = self.context.get_available_balances(fee_estimator);
6224 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6225 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6226 available_balances.next_outbound_htlc_minimum_msat)));
6229 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6230 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6231 available_balances.next_outbound_htlc_limit_msat)));
6234 if self.context.channel_state.is_peer_disconnected() {
6235 // Note that this should never really happen, if we're !is_live() on receipt of an
6236 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6237 // the user to send directly into a !is_live() channel. However, if we
6238 // disconnected during the time the previous hop was doing the commitment dance we may
6239 // end up getting here after the forwarding delay. In any case, returning an
6240 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6241 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6244 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6245 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6246 payment_hash, amount_msat,
6247 if force_holding_cell { "into holding cell" }
6248 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6249 else { "to peer" });
6251 if need_holding_cell {
6252 force_holding_cell = true;
6255 // Now update local state:
6256 if force_holding_cell {
6257 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6262 onion_routing_packet,
6269 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6270 htlc_id: self.context.next_holder_htlc_id,
6272 payment_hash: payment_hash.clone(),
6274 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6280 let res = msgs::UpdateAddHTLC {
6281 channel_id: self.context.channel_id,
6282 htlc_id: self.context.next_holder_htlc_id,
6286 onion_routing_packet,
6290 self.context.next_holder_htlc_id += 1;
6295 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6296 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6297 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6298 // fail to generate this, we still are at least at a position where upgrading their status
6300 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6301 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6302 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6304 if let Some(state) = new_state {
6305 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6309 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6310 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6311 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6312 // Grab the preimage, if it exists, instead of cloning
6313 let mut reason = OutboundHTLCOutcome::Success(None);
6314 mem::swap(outcome, &mut reason);
6315 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6318 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6319 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6320 debug_assert!(!self.context.is_outbound());
6321 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6322 self.context.feerate_per_kw = feerate;
6323 self.context.pending_update_fee = None;
6326 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6328 let (mut htlcs_ref, counterparty_commitment_tx) =
6329 self.build_commitment_no_state_update(logger);
6330 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6331 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6332 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6334 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6335 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6338 self.context.latest_monitor_update_id += 1;
6339 let monitor_update = ChannelMonitorUpdate {
6340 update_id: self.context.latest_monitor_update_id,
6341 counterparty_node_id: Some(self.context.counterparty_node_id),
6342 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6343 commitment_txid: counterparty_commitment_txid,
6344 htlc_outputs: htlcs.clone(),
6345 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6346 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6347 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6348 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6349 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6351 channel_id: Some(self.context.channel_id()),
6353 self.context.channel_state.set_awaiting_remote_revoke();
6357 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6358 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6359 where L::Target: Logger
6361 let counterparty_keys = self.context.build_remote_transaction_keys();
6362 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6363 let counterparty_commitment_tx = commitment_stats.tx;
6365 #[cfg(any(test, fuzzing))]
6367 if !self.context.is_outbound() {
6368 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6369 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6370 if let Some(info) = projected_commit_tx_info {
6371 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6372 if info.total_pending_htlcs == total_pending_htlcs
6373 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6374 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6375 && info.feerate == self.context.feerate_per_kw {
6376 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6377 assert_eq!(actual_fee, info.fee);
6383 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6386 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6387 /// generation when we shouldn't change HTLC/channel state.
6388 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6389 // Get the fee tests from `build_commitment_no_state_update`
6390 #[cfg(any(test, fuzzing))]
6391 self.build_commitment_no_state_update(logger);
6393 let counterparty_keys = self.context.build_remote_transaction_keys();
6394 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6395 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6397 match &self.context.holder_signer {
6398 ChannelSignerType::Ecdsa(ecdsa) => {
6399 let (signature, htlc_signatures);
6402 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6403 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6407 let res = ecdsa.sign_counterparty_commitment(
6408 &commitment_stats.tx,
6409 commitment_stats.inbound_htlc_preimages,
6410 commitment_stats.outbound_htlc_preimages,
6411 &self.context.secp_ctx,
6412 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6414 htlc_signatures = res.1;
6416 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6417 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6418 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6419 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6421 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6422 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6423 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6424 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6425 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6426 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6430 Ok((msgs::CommitmentSigned {
6431 channel_id: self.context.channel_id,
6435 partial_signature_with_nonce: None,
6436 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6438 // TODO (taproot|arik)
6444 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6445 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6447 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6448 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6449 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6450 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6451 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6452 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6453 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6454 where F::Target: FeeEstimator, L::Target: Logger
6456 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6457 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6458 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6461 let monitor_update = self.build_commitment_no_status_check(logger);
6462 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6463 Ok(self.push_ret_blockable_mon_update(monitor_update))
6469 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6471 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6472 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6473 fee_base_msat: msg.contents.fee_base_msat,
6474 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6475 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6477 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6479 self.context.counterparty_forwarding_info = new_forwarding_info;
6485 /// Begins the shutdown process, getting a message for the remote peer and returning all
6486 /// holding cell HTLCs for payment failure.
6487 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6488 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6489 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6491 for htlc in self.context.pending_outbound_htlcs.iter() {
6492 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6493 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6496 if self.context.channel_state.is_local_shutdown_sent() {
6497 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6499 else if self.context.channel_state.is_remote_shutdown_sent() {
6500 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6502 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6503 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6505 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6506 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6507 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6510 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6513 // use override shutdown script if provided
6514 let shutdown_scriptpubkey = match override_shutdown_script {
6515 Some(script) => script,
6517 // otherwise, use the shutdown scriptpubkey provided by the signer
6518 match signer_provider.get_shutdown_scriptpubkey() {
6519 Ok(scriptpubkey) => scriptpubkey,
6520 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6524 if !shutdown_scriptpubkey.is_compatible(their_features) {
6525 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6527 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6532 // From here on out, we may not fail!
6533 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6534 self.context.channel_state.set_local_shutdown_sent();
6535 self.context.local_initiated_shutdown = Some(());
6536 self.context.update_time_counter += 1;
6538 let monitor_update = if update_shutdown_script {
6539 self.context.latest_monitor_update_id += 1;
6540 let monitor_update = ChannelMonitorUpdate {
6541 update_id: self.context.latest_monitor_update_id,
6542 counterparty_node_id: Some(self.context.counterparty_node_id),
6543 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6544 scriptpubkey: self.get_closing_scriptpubkey(),
6546 channel_id: Some(self.context.channel_id()),
6548 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6549 self.push_ret_blockable_mon_update(monitor_update)
6551 let shutdown = msgs::Shutdown {
6552 channel_id: self.context.channel_id,
6553 scriptpubkey: self.get_closing_scriptpubkey(),
6556 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6557 // our shutdown until we've committed all of the pending changes.
6558 self.context.holding_cell_update_fee = None;
6559 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6560 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6562 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6563 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6570 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6571 "we can't both complete shutdown and return a monitor update");
6573 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6576 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6577 self.context.holding_cell_htlc_updates.iter()
6578 .flat_map(|htlc_update| {
6580 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6581 => Some((source, payment_hash)),
6585 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6589 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6590 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6591 pub context: ChannelContext<SP>,
6592 pub unfunded_context: UnfundedChannelContext,
6595 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6596 pub fn new<ES: Deref, F: Deref>(
6597 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6598 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6599 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6600 ) -> Result<OutboundV1Channel<SP>, APIError>
6601 where ES::Target: EntropySource,
6602 F::Target: FeeEstimator
6604 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6605 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6606 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6607 let pubkeys = holder_signer.pubkeys().clone();
6609 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6610 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6612 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6613 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6615 let channel_value_msat = channel_value_satoshis * 1000;
6616 if push_msat > channel_value_msat {
6617 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6619 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6620 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6622 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6623 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6624 // Protocol level safety check in place, although it should never happen because
6625 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6626 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6629 let channel_type = Self::get_initial_channel_type(&config, their_features);
6630 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6632 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6633 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6635 (ConfirmationTarget::NonAnchorChannelFee, 0)
6637 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6639 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6640 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6641 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6642 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6645 let mut secp_ctx = Secp256k1::new();
6646 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6648 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6649 match signer_provider.get_shutdown_scriptpubkey() {
6650 Ok(scriptpubkey) => Some(scriptpubkey),
6651 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6655 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6656 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6657 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6661 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6662 Ok(script) => script,
6663 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6666 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6669 context: ChannelContext {
6672 config: LegacyChannelConfig {
6673 options: config.channel_config.clone(),
6674 announced_channel: config.channel_handshake_config.announced_channel,
6675 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6680 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6682 channel_id: temporary_channel_id,
6683 temporary_channel_id: Some(temporary_channel_id),
6684 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6685 announcement_sigs_state: AnnouncementSigsState::NotSent,
6687 channel_value_satoshis,
6689 latest_monitor_update_id: 0,
6691 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6692 shutdown_scriptpubkey,
6695 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6696 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6699 pending_inbound_htlcs: Vec::new(),
6700 pending_outbound_htlcs: Vec::new(),
6701 holding_cell_htlc_updates: Vec::new(),
6702 pending_update_fee: None,
6703 holding_cell_update_fee: None,
6704 next_holder_htlc_id: 0,
6705 next_counterparty_htlc_id: 0,
6706 update_time_counter: 1,
6708 resend_order: RAACommitmentOrder::CommitmentFirst,
6710 monitor_pending_channel_ready: false,
6711 monitor_pending_revoke_and_ack: false,
6712 monitor_pending_commitment_signed: false,
6713 monitor_pending_forwards: Vec::new(),
6714 monitor_pending_failures: Vec::new(),
6715 monitor_pending_finalized_fulfills: Vec::new(),
6717 signer_pending_commitment_update: false,
6718 signer_pending_funding: false,
6720 #[cfg(debug_assertions)]
6721 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6722 #[cfg(debug_assertions)]
6723 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6725 last_sent_closing_fee: None,
6726 pending_counterparty_closing_signed: None,
6727 expecting_peer_commitment_signed: false,
6728 closing_fee_limits: None,
6729 target_closing_feerate_sats_per_kw: None,
6731 funding_tx_confirmed_in: None,
6732 funding_tx_confirmation_height: 0,
6733 short_channel_id: None,
6734 channel_creation_height: current_chain_height,
6736 feerate_per_kw: commitment_feerate,
6737 counterparty_dust_limit_satoshis: 0,
6738 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6739 counterparty_max_htlc_value_in_flight_msat: 0,
6740 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6741 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6742 holder_selected_channel_reserve_satoshis,
6743 counterparty_htlc_minimum_msat: 0,
6744 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6745 counterparty_max_accepted_htlcs: 0,
6746 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6747 minimum_depth: None, // Filled in in accept_channel
6749 counterparty_forwarding_info: None,
6751 channel_transaction_parameters: ChannelTransactionParameters {
6752 holder_pubkeys: pubkeys,
6753 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6754 is_outbound_from_holder: true,
6755 counterparty_parameters: None,
6756 funding_outpoint: None,
6757 channel_type_features: channel_type.clone()
6759 funding_transaction: None,
6760 is_batch_funding: None,
6762 counterparty_cur_commitment_point: None,
6763 counterparty_prev_commitment_point: None,
6764 counterparty_node_id,
6766 counterparty_shutdown_scriptpubkey: None,
6768 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6770 channel_update_status: ChannelUpdateStatus::Enabled,
6771 closing_signed_in_flight: false,
6773 announcement_sigs: None,
6775 #[cfg(any(test, fuzzing))]
6776 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6777 #[cfg(any(test, fuzzing))]
6778 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6780 workaround_lnd_bug_4006: None,
6781 sent_message_awaiting_response: None,
6783 latest_inbound_scid_alias: None,
6784 outbound_scid_alias,
6786 channel_pending_event_emitted: false,
6787 channel_ready_event_emitted: false,
6789 #[cfg(any(test, fuzzing))]
6790 historical_inbound_htlc_fulfills: new_hash_set(),
6795 blocked_monitor_updates: Vec::new(),
6796 local_initiated_shutdown: None,
6798 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6802 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6803 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6804 let counterparty_keys = self.context.build_remote_transaction_keys();
6805 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6806 let signature = match &self.context.holder_signer {
6807 // TODO (taproot|arik): move match into calling method for Taproot
6808 ChannelSignerType::Ecdsa(ecdsa) => {
6809 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6810 .map(|(sig, _)| sig).ok()?
6812 // TODO (taproot|arik)
6817 if self.context.signer_pending_funding {
6818 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6819 self.context.signer_pending_funding = false;
6822 Some(msgs::FundingCreated {
6823 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6824 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6825 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6828 partial_signature_with_nonce: None,
6830 next_local_nonce: None,
6834 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6835 /// a funding_created message for the remote peer.
6836 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6837 /// or if called on an inbound channel.
6838 /// Note that channel_id changes during this call!
6839 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6840 /// If an Err is returned, it is a ChannelError::Close.
6841 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6842 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6843 if !self.context.is_outbound() {
6844 panic!("Tried to create outbound funding_created message on an inbound channel!");
6847 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6848 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6850 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6852 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6853 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6854 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6855 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6858 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6859 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6861 // Now that we're past error-generating stuff, update our local state:
6863 self.context.channel_state = ChannelState::FundingNegotiated;
6864 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
6866 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6867 // We can skip this if it is a zero-conf channel.
6868 if funding_transaction.is_coin_base() &&
6869 self.context.minimum_depth.unwrap_or(0) > 0 &&
6870 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6871 self.context.minimum_depth = Some(COINBASE_MATURITY);
6874 self.context.funding_transaction = Some(funding_transaction);
6875 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6877 let funding_created = self.get_funding_created_msg(logger);
6878 if funding_created.is_none() {
6879 #[cfg(not(async_signing))] {
6880 panic!("Failed to get signature for new funding creation");
6882 #[cfg(async_signing)] {
6883 if !self.context.signer_pending_funding {
6884 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6885 self.context.signer_pending_funding = true;
6893 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6894 // The default channel type (ie the first one we try) depends on whether the channel is
6895 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6896 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6897 // with no other changes, and fall back to `only_static_remotekey`.
6898 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6899 if !config.channel_handshake_config.announced_channel &&
6900 config.channel_handshake_config.negotiate_scid_privacy &&
6901 their_features.supports_scid_privacy() {
6902 ret.set_scid_privacy_required();
6905 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6906 // set it now. If they don't understand it, we'll fall back to our default of
6907 // `only_static_remotekey`.
6908 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6909 their_features.supports_anchors_zero_fee_htlc_tx() {
6910 ret.set_anchors_zero_fee_htlc_tx_required();
6916 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6917 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6918 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6919 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6920 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6921 ) -> Result<msgs::OpenChannel, ()>
6923 F::Target: FeeEstimator
6925 if !self.context.is_outbound() ||
6927 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6928 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6933 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6934 // We've exhausted our options
6937 // We support opening a few different types of channels. Try removing our additional
6938 // features one by one until we've either arrived at our default or the counterparty has
6941 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6942 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6943 // checks whether the counterparty supports every feature, this would only happen if the
6944 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6946 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6947 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6948 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6949 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6950 } else if self.context.channel_type.supports_scid_privacy() {
6951 self.context.channel_type.clear_scid_privacy();
6953 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6955 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6956 Ok(self.get_open_channel(chain_hash))
6959 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6960 if !self.context.is_outbound() {
6961 panic!("Tried to open a channel for an inbound channel?");
6963 if self.context.have_received_message() {
6964 panic!("Cannot generate an open_channel after we've moved forward");
6967 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6968 panic!("Tried to send an open_channel for a channel that has already advanced");
6971 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6972 let keys = self.context.get_holder_pubkeys();
6975 common_fields: msgs::CommonOpenChannelFields {
6977 temporary_channel_id: self.context.channel_id,
6978 funding_satoshis: self.context.channel_value_satoshis,
6979 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6980 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6981 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6982 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
6983 to_self_delay: self.context.get_holder_selected_contest_delay(),
6984 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6985 funding_pubkey: keys.funding_pubkey,
6986 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6987 payment_basepoint: keys.payment_point,
6988 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6989 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6990 first_per_commitment_point,
6991 channel_flags: if self.context.config.announced_channel {1} else {0},
6992 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6993 Some(script) => script.clone().into_inner(),
6994 None => Builder::new().into_script(),
6996 channel_type: Some(self.context.channel_type.clone()),
6998 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6999 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7004 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7005 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7007 // Check sanity of message fields:
7008 if !self.context.is_outbound() {
7009 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7011 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7012 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7014 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7015 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7017 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7018 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7020 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7021 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7023 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7024 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7025 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7027 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7028 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7029 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7031 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7032 if msg.common_fields.to_self_delay > max_delay_acceptable {
7033 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7035 if msg.common_fields.max_accepted_htlcs < 1 {
7036 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7038 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7039 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7042 // Now check against optional parameters as set by config...
7043 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7044 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7046 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7047 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7049 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7050 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7052 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7053 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7055 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7056 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7058 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7059 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7061 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7062 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7065 if let Some(ty) = &msg.common_fields.channel_type {
7066 if *ty != self.context.channel_type {
7067 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7069 } else if their_features.supports_channel_type() {
7070 // Assume they've accepted the channel type as they said they understand it.
7072 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7073 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7074 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7076 self.context.channel_type = channel_type.clone();
7077 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7080 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7081 match &msg.common_fields.shutdown_scriptpubkey {
7082 &Some(ref script) => {
7083 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7084 if script.len() == 0 {
7087 if !script::is_bolt2_compliant(&script, their_features) {
7088 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7090 Some(script.clone())
7093 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7095 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7100 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7101 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7102 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7103 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7104 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7106 if peer_limits.trust_own_funding_0conf {
7107 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7109 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7112 let counterparty_pubkeys = ChannelPublicKeys {
7113 funding_pubkey: msg.common_fields.funding_pubkey,
7114 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7115 payment_point: msg.common_fields.payment_basepoint,
7116 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7117 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7120 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7121 selected_contest_delay: msg.common_fields.to_self_delay,
7122 pubkeys: counterparty_pubkeys,
7125 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7126 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7128 self.context.channel_state = ChannelState::NegotiatingFunding(
7129 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7131 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7136 /// Handles a funding_signed message from the remote end.
7137 /// If this call is successful, broadcast the funding transaction (and not before!)
7138 pub fn funding_signed<L: Deref>(
7139 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7140 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7144 if !self.context.is_outbound() {
7145 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7147 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7148 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7150 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7151 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7152 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7153 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7156 let funding_script = self.context.get_funding_redeemscript();
7158 let counterparty_keys = self.context.build_remote_transaction_keys();
7159 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7160 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7161 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7163 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7164 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7166 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7167 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7169 let trusted_tx = initial_commitment_tx.trust();
7170 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7171 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7172 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7173 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7174 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7178 let holder_commitment_tx = HolderCommitmentTransaction::new(
7179 initial_commitment_tx,
7182 &self.context.get_holder_pubkeys().funding_pubkey,
7183 self.context.counterparty_funding_pubkey()
7187 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7188 if validated.is_err() {
7189 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7192 let funding_redeemscript = self.context.get_funding_redeemscript();
7193 let funding_txo = self.context.get_funding_txo().unwrap();
7194 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7195 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7196 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7197 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7198 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7199 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7200 shutdown_script, self.context.get_holder_selected_contest_delay(),
7201 &self.context.destination_script, (funding_txo, funding_txo_script),
7202 &self.context.channel_transaction_parameters,
7203 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7205 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7206 channel_monitor.provide_initial_counterparty_commitment_tx(
7207 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7208 self.context.cur_counterparty_commitment_transaction_number,
7209 self.context.counterparty_cur_commitment_point.unwrap(),
7210 counterparty_initial_commitment_tx.feerate_per_kw(),
7211 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7212 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7214 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7215 if self.context.is_batch_funding() {
7216 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7218 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7220 self.context.cur_holder_commitment_transaction_number -= 1;
7221 self.context.cur_counterparty_commitment_transaction_number -= 1;
7223 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7225 let mut channel = Channel { context: self.context };
7227 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7228 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7229 Ok((channel, channel_monitor))
7232 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7234 #[cfg(async_signing)]
7235 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7236 if self.context.signer_pending_funding && self.context.is_outbound() {
7237 log_trace!(logger, "Signer unblocked a funding_created");
7238 self.get_funding_created_msg(logger)
7243 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7244 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7245 pub context: ChannelContext<SP>,
7246 pub unfunded_context: UnfundedChannelContext,
7249 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7250 /// [`msgs::OpenChannel`].
7251 pub(super) fn channel_type_from_open_channel(
7252 msg: &msgs::OpenChannel, their_features: &InitFeatures,
7253 our_supported_features: &ChannelTypeFeatures
7254 ) -> Result<ChannelTypeFeatures, ChannelError> {
7255 if let Some(channel_type) = &msg.common_fields.channel_type {
7256 if channel_type.supports_any_optional_bits() {
7257 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7260 // We only support the channel types defined by the `ChannelManager` in
7261 // `provided_channel_type_features`. The channel type must always support
7262 // `static_remote_key`.
7263 if !channel_type.requires_static_remote_key() {
7264 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7266 // Make sure we support all of the features behind the channel type.
7267 if !channel_type.is_subset(our_supported_features) {
7268 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7270 let announced_channel = if (msg.common_fields.channel_flags & 1) == 1 { true } else { false };
7271 if channel_type.requires_scid_privacy() && announced_channel {
7272 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7274 Ok(channel_type.clone())
7276 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7277 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7278 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7284 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7285 /// Creates a new channel from a remote sides' request for one.
7286 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7287 pub fn new<ES: Deref, F: Deref, L: Deref>(
7288 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7289 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7290 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7291 current_chain_height: u32, logger: &L, is_0conf: bool,
7292 ) -> Result<InboundV1Channel<SP>, ChannelError>
7293 where ES::Target: EntropySource,
7294 F::Target: FeeEstimator,
7297 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7298 let announced_channel = if (msg.common_fields.channel_flags & 1) == 1 { true } else { false };
7300 // First check the channel type is known, failing before we do anything else if we don't
7301 // support this channel type.
7302 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
7304 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.common_fields.funding_satoshis, user_id);
7305 let holder_signer = signer_provider.derive_channel_signer(msg.common_fields.funding_satoshis, channel_keys_id);
7306 let pubkeys = holder_signer.pubkeys().clone();
7307 let counterparty_pubkeys = ChannelPublicKeys {
7308 funding_pubkey: msg.common_fields.funding_pubkey,
7309 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7310 payment_point: msg.common_fields.payment_basepoint,
7311 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7312 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7315 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
7316 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
7319 // Check sanity of message fields:
7320 if msg.common_fields.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
7321 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.common_fields.funding_satoshis)));
7323 if msg.common_fields.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
7324 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.common_fields.funding_satoshis)));
7326 if msg.channel_reserve_satoshis > msg.common_fields.funding_satoshis {
7327 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.common_fields.funding_satoshis)));
7329 let full_channel_value_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
7330 if msg.push_msat > full_channel_value_msat {
7331 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
7333 if msg.common_fields.dust_limit_satoshis > msg.common_fields.funding_satoshis {
7334 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.common_fields.dust_limit_satoshis, msg.common_fields.funding_satoshis)));
7336 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7337 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7339 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.common_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
7341 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7342 if msg.common_fields.to_self_delay > max_counterparty_selected_contest_delay {
7343 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.common_fields.to_self_delay)));
7345 if msg.common_fields.max_accepted_htlcs < 1 {
7346 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7348 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7349 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7352 // Now check against optional parameters as set by config...
7353 if msg.common_fields.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
7354 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.common_fields.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
7356 if msg.common_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
7357 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
7359 if msg.common_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
7360 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
7362 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
7363 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
7365 if msg.common_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
7366 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
7368 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7369 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7371 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7372 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7375 // Convert things into internal flags and prep our state:
7377 if config.channel_handshake_limits.force_announced_channel_preference {
7378 if config.channel_handshake_config.announced_channel != announced_channel {
7379 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
7383 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7384 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7385 // Protocol level safety check in place, although it should never happen because
7386 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7387 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7389 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
7390 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7392 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7393 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7394 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7396 if holder_selected_channel_reserve_satoshis < msg.common_fields.dust_limit_satoshis {
7397 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.common_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7400 // check if the funder's amount for the initial commitment tx is sufficient
7401 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7402 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7403 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7407 let funders_amount_msat = msg.common_fields.funding_satoshis * 1000 - msg.push_msat;
7408 let commitment_tx_fee = commit_tx_fee_msat(msg.common_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7409 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7410 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7413 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7414 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7415 // want to push much to us), our counterparty should always have more than our reserve.
7416 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7417 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7420 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7421 match &msg.common_fields.shutdown_scriptpubkey {
7422 &Some(ref script) => {
7423 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7424 if script.len() == 0 {
7427 if !script::is_bolt2_compliant(&script, their_features) {
7428 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7430 Some(script.clone())
7433 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7435 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7440 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7441 match signer_provider.get_shutdown_scriptpubkey() {
7442 Ok(scriptpubkey) => Some(scriptpubkey),
7443 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7447 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7448 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7449 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7453 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7454 Ok(script) => script,
7455 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7458 let mut secp_ctx = Secp256k1::new();
7459 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7461 let minimum_depth = if is_0conf {
7464 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7468 context: ChannelContext {
7471 config: LegacyChannelConfig {
7472 options: config.channel_config.clone(),
7474 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7479 inbound_handshake_limits_override: None,
7481 temporary_channel_id: Some(msg.common_fields.temporary_channel_id),
7482 channel_id: msg.common_fields.temporary_channel_id,
7483 channel_state: ChannelState::NegotiatingFunding(
7484 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7486 announcement_sigs_state: AnnouncementSigsState::NotSent,
7489 latest_monitor_update_id: 0,
7491 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7492 shutdown_scriptpubkey,
7495 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7496 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7497 value_to_self_msat: msg.push_msat,
7499 pending_inbound_htlcs: Vec::new(),
7500 pending_outbound_htlcs: Vec::new(),
7501 holding_cell_htlc_updates: Vec::new(),
7502 pending_update_fee: None,
7503 holding_cell_update_fee: None,
7504 next_holder_htlc_id: 0,
7505 next_counterparty_htlc_id: 0,
7506 update_time_counter: 1,
7508 resend_order: RAACommitmentOrder::CommitmentFirst,
7510 monitor_pending_channel_ready: false,
7511 monitor_pending_revoke_and_ack: false,
7512 monitor_pending_commitment_signed: false,
7513 monitor_pending_forwards: Vec::new(),
7514 monitor_pending_failures: Vec::new(),
7515 monitor_pending_finalized_fulfills: Vec::new(),
7517 signer_pending_commitment_update: false,
7518 signer_pending_funding: false,
7520 #[cfg(debug_assertions)]
7521 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.common_fields.funding_satoshis * 1000 - msg.push_msat)),
7522 #[cfg(debug_assertions)]
7523 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.common_fields.funding_satoshis * 1000 - msg.push_msat)),
7525 last_sent_closing_fee: None,
7526 pending_counterparty_closing_signed: None,
7527 expecting_peer_commitment_signed: false,
7528 closing_fee_limits: None,
7529 target_closing_feerate_sats_per_kw: None,
7531 funding_tx_confirmed_in: None,
7532 funding_tx_confirmation_height: 0,
7533 short_channel_id: None,
7534 channel_creation_height: current_chain_height,
7536 feerate_per_kw: msg.common_fields.commitment_feerate_sat_per_1000_weight,
7537 channel_value_satoshis: msg.common_fields.funding_satoshis,
7538 counterparty_dust_limit_satoshis: msg.common_fields.dust_limit_satoshis,
7539 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7540 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, msg.common_fields.funding_satoshis * 1000),
7541 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.common_fields.funding_satoshis, &config.channel_handshake_config),
7542 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7543 holder_selected_channel_reserve_satoshis,
7544 counterparty_htlc_minimum_msat: msg.common_fields.htlc_minimum_msat,
7545 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7546 counterparty_max_accepted_htlcs: msg.common_fields.max_accepted_htlcs,
7547 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7550 counterparty_forwarding_info: None,
7552 channel_transaction_parameters: ChannelTransactionParameters {
7553 holder_pubkeys: pubkeys,
7554 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7555 is_outbound_from_holder: false,
7556 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7557 selected_contest_delay: msg.common_fields.to_self_delay,
7558 pubkeys: counterparty_pubkeys,
7560 funding_outpoint: None,
7561 channel_type_features: channel_type.clone()
7563 funding_transaction: None,
7564 is_batch_funding: None,
7566 counterparty_cur_commitment_point: Some(msg.common_fields.first_per_commitment_point),
7567 counterparty_prev_commitment_point: None,
7568 counterparty_node_id,
7570 counterparty_shutdown_scriptpubkey,
7572 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7574 channel_update_status: ChannelUpdateStatus::Enabled,
7575 closing_signed_in_flight: false,
7577 announcement_sigs: None,
7579 #[cfg(any(test, fuzzing))]
7580 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7581 #[cfg(any(test, fuzzing))]
7582 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7584 workaround_lnd_bug_4006: None,
7585 sent_message_awaiting_response: None,
7587 latest_inbound_scid_alias: None,
7588 outbound_scid_alias: 0,
7590 channel_pending_event_emitted: false,
7591 channel_ready_event_emitted: false,
7593 #[cfg(any(test, fuzzing))]
7594 historical_inbound_htlc_fulfills: new_hash_set(),
7599 local_initiated_shutdown: None,
7601 blocked_monitor_updates: Vec::new(),
7603 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7609 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7610 /// should be sent back to the counterparty node.
7612 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7613 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7614 if self.context.is_outbound() {
7615 panic!("Tried to send accept_channel for an outbound channel?");
7618 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7619 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7621 panic!("Tried to send accept_channel after channel had moved forward");
7623 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7624 panic!("Tried to send an accept_channel for a channel that has already advanced");
7627 self.generate_accept_channel_message()
7630 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7631 /// inbound channel. If the intention is to accept an inbound channel, use
7632 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7634 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7635 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7636 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7637 let keys = self.context.get_holder_pubkeys();
7639 msgs::AcceptChannel {
7640 common_fields: msgs::CommonAcceptChannelFields {
7641 temporary_channel_id: self.context.channel_id,
7642 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7643 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7644 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7645 minimum_depth: self.context.minimum_depth.unwrap(),
7646 to_self_delay: self.context.get_holder_selected_contest_delay(),
7647 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7648 funding_pubkey: keys.funding_pubkey,
7649 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7650 payment_basepoint: keys.payment_point,
7651 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7652 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7653 first_per_commitment_point,
7654 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7655 Some(script) => script.clone().into_inner(),
7656 None => Builder::new().into_script(),
7658 channel_type: Some(self.context.channel_type.clone()),
7660 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7662 next_local_nonce: None,
7666 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7667 /// inbound channel without accepting it.
7669 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7671 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7672 self.generate_accept_channel_message()
7675 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7676 let funding_script = self.context.get_funding_redeemscript();
7678 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7679 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7680 let trusted_tx = initial_commitment_tx.trust();
7681 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7682 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7683 // They sign the holder commitment transaction...
7684 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7685 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7686 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7687 encode::serialize_hex(&funding_script), &self.context.channel_id());
7688 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7690 Ok(initial_commitment_tx)
7693 pub fn funding_created<L: Deref>(
7694 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7695 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7699 if self.context.is_outbound() {
7700 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7703 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7704 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7706 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7707 // remember the channel, so it's safe to just send an error_message here and drop the
7709 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7711 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7712 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7713 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7714 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7717 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7718 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7719 // This is an externally observable change before we finish all our checks. In particular
7720 // check_funding_created_signature may fail.
7721 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7723 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7725 Err(ChannelError::Close(e)) => {
7726 self.context.channel_transaction_parameters.funding_outpoint = None;
7727 return Err((self, ChannelError::Close(e)));
7730 // The only error we know how to handle is ChannelError::Close, so we fall over here
7731 // to make sure we don't continue with an inconsistent state.
7732 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7736 let holder_commitment_tx = HolderCommitmentTransaction::new(
7737 initial_commitment_tx,
7740 &self.context.get_holder_pubkeys().funding_pubkey,
7741 self.context.counterparty_funding_pubkey()
7744 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7745 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7748 // Now that we're past error-generating stuff, update our local state:
7750 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7751 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7752 self.context.cur_counterparty_commitment_transaction_number -= 1;
7753 self.context.cur_holder_commitment_transaction_number -= 1;
7755 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7757 let funding_redeemscript = self.context.get_funding_redeemscript();
7758 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7759 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7760 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7761 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7762 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7763 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7764 shutdown_script, self.context.get_holder_selected_contest_delay(),
7765 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7766 &self.context.channel_transaction_parameters,
7767 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7769 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7770 channel_monitor.provide_initial_counterparty_commitment_tx(
7771 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7772 self.context.cur_counterparty_commitment_transaction_number + 1,
7773 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7774 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7775 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7777 log_info!(logger, "{} funding_signed for peer for channel {}",
7778 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7780 // Promote the channel to a full-fledged one now that we have updated the state and have a
7781 // `ChannelMonitor`.
7782 let mut channel = Channel {
7783 context: self.context,
7785 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7786 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7788 Ok((channel, funding_signed, channel_monitor))
7792 const SERIALIZATION_VERSION: u8 = 3;
7793 const MIN_SERIALIZATION_VERSION: u8 = 3;
7795 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7801 impl Writeable for ChannelUpdateStatus {
7802 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7803 // We only care about writing out the current state as it was announced, ie only either
7804 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7805 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7807 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7808 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7809 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7810 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7816 impl Readable for ChannelUpdateStatus {
7817 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7818 Ok(match <u8 as Readable>::read(reader)? {
7819 0 => ChannelUpdateStatus::Enabled,
7820 1 => ChannelUpdateStatus::Disabled,
7821 _ => return Err(DecodeError::InvalidValue),
7826 impl Writeable for AnnouncementSigsState {
7827 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7828 // We only care about writing out the current state as if we had just disconnected, at
7829 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7831 AnnouncementSigsState::NotSent => 0u8.write(writer),
7832 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7833 AnnouncementSigsState::Committed => 0u8.write(writer),
7834 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7839 impl Readable for AnnouncementSigsState {
7840 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7841 Ok(match <u8 as Readable>::read(reader)? {
7842 0 => AnnouncementSigsState::NotSent,
7843 1 => AnnouncementSigsState::PeerReceived,
7844 _ => return Err(DecodeError::InvalidValue),
7849 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7850 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7851 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7854 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7856 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7857 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7858 // the low bytes now and the optional high bytes later.
7859 let user_id_low = self.context.user_id as u64;
7860 user_id_low.write(writer)?;
7862 // Version 1 deserializers expected to read parts of the config object here. Version 2
7863 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7864 // `minimum_depth` we simply write dummy values here.
7865 writer.write_all(&[0; 8])?;
7867 self.context.channel_id.write(writer)?;
7869 let mut channel_state = self.context.channel_state;
7870 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7871 channel_state.set_peer_disconnected();
7873 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
7875 channel_state.to_u32().write(writer)?;
7877 self.context.channel_value_satoshis.write(writer)?;
7879 self.context.latest_monitor_update_id.write(writer)?;
7881 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7882 // deserialized from that format.
7883 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7884 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7885 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7887 self.context.destination_script.write(writer)?;
7889 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7890 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7891 self.context.value_to_self_msat.write(writer)?;
7893 let mut dropped_inbound_htlcs = 0;
7894 for htlc in self.context.pending_inbound_htlcs.iter() {
7895 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7896 dropped_inbound_htlcs += 1;
7899 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7900 for htlc in self.context.pending_inbound_htlcs.iter() {
7901 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7904 htlc.htlc_id.write(writer)?;
7905 htlc.amount_msat.write(writer)?;
7906 htlc.cltv_expiry.write(writer)?;
7907 htlc.payment_hash.write(writer)?;
7909 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7910 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7912 htlc_state.write(writer)?;
7914 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7916 htlc_state.write(writer)?;
7918 &InboundHTLCState::Committed => {
7921 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7923 removal_reason.write(writer)?;
7928 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7929 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7930 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7932 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7933 for htlc in self.context.pending_outbound_htlcs.iter() {
7934 htlc.htlc_id.write(writer)?;
7935 htlc.amount_msat.write(writer)?;
7936 htlc.cltv_expiry.write(writer)?;
7937 htlc.payment_hash.write(writer)?;
7938 htlc.source.write(writer)?;
7940 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7942 onion_packet.write(writer)?;
7944 &OutboundHTLCState::Committed => {
7947 &OutboundHTLCState::RemoteRemoved(_) => {
7948 // Treat this as a Committed because we haven't received the CS - they'll
7949 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7952 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7954 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7955 preimages.push(preimage);
7957 let reason: Option<&HTLCFailReason> = outcome.into();
7958 reason.write(writer)?;
7960 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7962 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7963 preimages.push(preimage);
7965 let reason: Option<&HTLCFailReason> = outcome.into();
7966 reason.write(writer)?;
7969 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7970 pending_outbound_blinding_points.push(htlc.blinding_point);
7973 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7974 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7975 // Vec of (htlc_id, failure_code, sha256_of_onion)
7976 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7977 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7978 for update in self.context.holding_cell_htlc_updates.iter() {
7980 &HTLCUpdateAwaitingACK::AddHTLC {
7981 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7982 blinding_point, skimmed_fee_msat,
7985 amount_msat.write(writer)?;
7986 cltv_expiry.write(writer)?;
7987 payment_hash.write(writer)?;
7988 source.write(writer)?;
7989 onion_routing_packet.write(writer)?;
7991 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7992 holding_cell_blinding_points.push(blinding_point);
7994 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7996 payment_preimage.write(writer)?;
7997 htlc_id.write(writer)?;
7999 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8001 htlc_id.write(writer)?;
8002 err_packet.write(writer)?;
8004 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8005 htlc_id, failure_code, sha256_of_onion
8007 // We don't want to break downgrading by adding a new variant, so write a dummy
8008 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8009 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8011 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8013 htlc_id.write(writer)?;
8014 dummy_err_packet.write(writer)?;
8019 match self.context.resend_order {
8020 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8021 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8024 self.context.monitor_pending_channel_ready.write(writer)?;
8025 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8026 self.context.monitor_pending_commitment_signed.write(writer)?;
8028 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8029 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8030 pending_forward.write(writer)?;
8031 htlc_id.write(writer)?;
8034 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8035 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8036 htlc_source.write(writer)?;
8037 payment_hash.write(writer)?;
8038 fail_reason.write(writer)?;
8041 if self.context.is_outbound() {
8042 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8043 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8044 Some(feerate).write(writer)?;
8046 // As for inbound HTLCs, if the update was only announced and never committed in a
8047 // commitment_signed, drop it.
8048 None::<u32>.write(writer)?;
8050 self.context.holding_cell_update_fee.write(writer)?;
8052 self.context.next_holder_htlc_id.write(writer)?;
8053 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8054 self.context.update_time_counter.write(writer)?;
8055 self.context.feerate_per_kw.write(writer)?;
8057 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8058 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8059 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8060 // consider the stale state on reload.
8063 self.context.funding_tx_confirmed_in.write(writer)?;
8064 self.context.funding_tx_confirmation_height.write(writer)?;
8065 self.context.short_channel_id.write(writer)?;
8067 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8068 self.context.holder_dust_limit_satoshis.write(writer)?;
8069 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8071 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8072 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8074 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8075 self.context.holder_htlc_minimum_msat.write(writer)?;
8076 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8078 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8079 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8081 match &self.context.counterparty_forwarding_info {
8084 info.fee_base_msat.write(writer)?;
8085 info.fee_proportional_millionths.write(writer)?;
8086 info.cltv_expiry_delta.write(writer)?;
8088 None => 0u8.write(writer)?
8091 self.context.channel_transaction_parameters.write(writer)?;
8092 self.context.funding_transaction.write(writer)?;
8094 self.context.counterparty_cur_commitment_point.write(writer)?;
8095 self.context.counterparty_prev_commitment_point.write(writer)?;
8096 self.context.counterparty_node_id.write(writer)?;
8098 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8100 self.context.commitment_secrets.write(writer)?;
8102 self.context.channel_update_status.write(writer)?;
8104 #[cfg(any(test, fuzzing))]
8105 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8106 #[cfg(any(test, fuzzing))]
8107 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8108 htlc.write(writer)?;
8111 // If the channel type is something other than only-static-remote-key, then we need to have
8112 // older clients fail to deserialize this channel at all. If the type is
8113 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8115 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8116 Some(&self.context.channel_type) } else { None };
8118 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8119 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8120 // a different percentage of the channel value then 10%, which older versions of LDK used
8121 // to set it to before the percentage was made configurable.
8122 let serialized_holder_selected_reserve =
8123 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8124 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8126 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8127 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8128 let serialized_holder_htlc_max_in_flight =
8129 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8130 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8132 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8133 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8135 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8136 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8137 // we write the high bytes as an option here.
8138 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8140 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8142 write_tlv_fields!(writer, {
8143 (0, self.context.announcement_sigs, option),
8144 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8145 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8146 // them twice, once with their original default values above, and once as an option
8147 // here. On the read side, old versions will simply ignore the odd-type entries here,
8148 // and new versions map the default values to None and allow the TLV entries here to
8150 (1, self.context.minimum_depth, option),
8151 (2, chan_type, option),
8152 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8153 (4, serialized_holder_selected_reserve, option),
8154 (5, self.context.config, required),
8155 (6, serialized_holder_htlc_max_in_flight, option),
8156 (7, self.context.shutdown_scriptpubkey, option),
8157 (8, self.context.blocked_monitor_updates, optional_vec),
8158 (9, self.context.target_closing_feerate_sats_per_kw, option),
8159 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8160 (13, self.context.channel_creation_height, required),
8161 (15, preimages, required_vec),
8162 (17, self.context.announcement_sigs_state, required),
8163 (19, self.context.latest_inbound_scid_alias, option),
8164 (21, self.context.outbound_scid_alias, required),
8165 (23, channel_ready_event_emitted, option),
8166 (25, user_id_high_opt, option),
8167 (27, self.context.channel_keys_id, required),
8168 (28, holder_max_accepted_htlcs, option),
8169 (29, self.context.temporary_channel_id, option),
8170 (31, channel_pending_event_emitted, option),
8171 (35, pending_outbound_skimmed_fees, optional_vec),
8172 (37, holding_cell_skimmed_fees, optional_vec),
8173 (38, self.context.is_batch_funding, option),
8174 (39, pending_outbound_blinding_points, optional_vec),
8175 (41, holding_cell_blinding_points, optional_vec),
8176 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8177 (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8184 const MAX_ALLOC_SIZE: usize = 64*1024;
8185 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8187 ES::Target: EntropySource,
8188 SP::Target: SignerProvider
8190 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8191 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8192 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8194 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8195 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8196 // the low bytes now and the high bytes later.
8197 let user_id_low: u64 = Readable::read(reader)?;
8199 let mut config = Some(LegacyChannelConfig::default());
8201 // Read the old serialization of the ChannelConfig from version 0.0.98.
8202 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8203 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8204 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8205 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8207 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8208 let mut _val: u64 = Readable::read(reader)?;
8211 let channel_id = Readable::read(reader)?;
8212 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8213 let channel_value_satoshis = Readable::read(reader)?;
8215 let latest_monitor_update_id = Readable::read(reader)?;
8217 let mut keys_data = None;
8219 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8220 // the `channel_keys_id` TLV is present below.
8221 let keys_len: u32 = Readable::read(reader)?;
8222 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8223 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8224 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8225 let mut data = [0; 1024];
8226 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8227 reader.read_exact(read_slice)?;
8228 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8232 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8233 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8234 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8237 let destination_script = Readable::read(reader)?;
8239 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8240 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8241 let value_to_self_msat = Readable::read(reader)?;
8243 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8245 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8246 for _ in 0..pending_inbound_htlc_count {
8247 pending_inbound_htlcs.push(InboundHTLCOutput {
8248 htlc_id: Readable::read(reader)?,
8249 amount_msat: Readable::read(reader)?,
8250 cltv_expiry: Readable::read(reader)?,
8251 payment_hash: Readable::read(reader)?,
8252 state: match <u8 as Readable>::read(reader)? {
8253 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8254 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8255 3 => InboundHTLCState::Committed,
8256 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8257 _ => return Err(DecodeError::InvalidValue),
8262 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8263 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8264 for _ in 0..pending_outbound_htlc_count {
8265 pending_outbound_htlcs.push(OutboundHTLCOutput {
8266 htlc_id: Readable::read(reader)?,
8267 amount_msat: Readable::read(reader)?,
8268 cltv_expiry: Readable::read(reader)?,
8269 payment_hash: Readable::read(reader)?,
8270 source: Readable::read(reader)?,
8271 state: match <u8 as Readable>::read(reader)? {
8272 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8273 1 => OutboundHTLCState::Committed,
8275 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8276 OutboundHTLCState::RemoteRemoved(option.into())
8279 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8280 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8283 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8284 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8286 _ => return Err(DecodeError::InvalidValue),
8288 skimmed_fee_msat: None,
8289 blinding_point: None,
8293 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8294 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8295 for _ in 0..holding_cell_htlc_update_count {
8296 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8297 0 => HTLCUpdateAwaitingACK::AddHTLC {
8298 amount_msat: Readable::read(reader)?,
8299 cltv_expiry: Readable::read(reader)?,
8300 payment_hash: Readable::read(reader)?,
8301 source: Readable::read(reader)?,
8302 onion_routing_packet: Readable::read(reader)?,
8303 skimmed_fee_msat: None,
8304 blinding_point: None,
8306 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8307 payment_preimage: Readable::read(reader)?,
8308 htlc_id: Readable::read(reader)?,
8310 2 => HTLCUpdateAwaitingACK::FailHTLC {
8311 htlc_id: Readable::read(reader)?,
8312 err_packet: Readable::read(reader)?,
8314 _ => return Err(DecodeError::InvalidValue),
8318 let resend_order = match <u8 as Readable>::read(reader)? {
8319 0 => RAACommitmentOrder::CommitmentFirst,
8320 1 => RAACommitmentOrder::RevokeAndACKFirst,
8321 _ => return Err(DecodeError::InvalidValue),
8324 let monitor_pending_channel_ready = Readable::read(reader)?;
8325 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8326 let monitor_pending_commitment_signed = Readable::read(reader)?;
8328 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8329 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8330 for _ in 0..monitor_pending_forwards_count {
8331 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8334 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8335 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8336 for _ in 0..monitor_pending_failures_count {
8337 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8340 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8342 let holding_cell_update_fee = Readable::read(reader)?;
8344 let next_holder_htlc_id = Readable::read(reader)?;
8345 let next_counterparty_htlc_id = Readable::read(reader)?;
8346 let update_time_counter = Readable::read(reader)?;
8347 let feerate_per_kw = Readable::read(reader)?;
8349 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8350 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8351 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8352 // consider the stale state on reload.
8353 match <u8 as Readable>::read(reader)? {
8356 let _: u32 = Readable::read(reader)?;
8357 let _: u64 = Readable::read(reader)?;
8358 let _: Signature = Readable::read(reader)?;
8360 _ => return Err(DecodeError::InvalidValue),
8363 let funding_tx_confirmed_in = Readable::read(reader)?;
8364 let funding_tx_confirmation_height = Readable::read(reader)?;
8365 let short_channel_id = Readable::read(reader)?;
8367 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8368 let holder_dust_limit_satoshis = Readable::read(reader)?;
8369 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8370 let mut counterparty_selected_channel_reserve_satoshis = None;
8372 // Read the old serialization from version 0.0.98.
8373 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8375 // Read the 8 bytes of backwards-compatibility data.
8376 let _dummy: u64 = Readable::read(reader)?;
8378 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8379 let holder_htlc_minimum_msat = Readable::read(reader)?;
8380 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8382 let mut minimum_depth = None;
8384 // Read the old serialization from version 0.0.98.
8385 minimum_depth = Some(Readable::read(reader)?);
8387 // Read the 4 bytes of backwards-compatibility data.
8388 let _dummy: u32 = Readable::read(reader)?;
8391 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8393 1 => Some(CounterpartyForwardingInfo {
8394 fee_base_msat: Readable::read(reader)?,
8395 fee_proportional_millionths: Readable::read(reader)?,
8396 cltv_expiry_delta: Readable::read(reader)?,
8398 _ => return Err(DecodeError::InvalidValue),
8401 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8402 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8404 let counterparty_cur_commitment_point = Readable::read(reader)?;
8406 let counterparty_prev_commitment_point = Readable::read(reader)?;
8407 let counterparty_node_id = Readable::read(reader)?;
8409 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8410 let commitment_secrets = Readable::read(reader)?;
8412 let channel_update_status = Readable::read(reader)?;
8414 #[cfg(any(test, fuzzing))]
8415 let mut historical_inbound_htlc_fulfills = new_hash_set();
8416 #[cfg(any(test, fuzzing))]
8418 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8419 for _ in 0..htlc_fulfills_len {
8420 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8424 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8425 Some((feerate, if channel_parameters.is_outbound_from_holder {
8426 FeeUpdateState::Outbound
8428 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8434 let mut announcement_sigs = None;
8435 let mut target_closing_feerate_sats_per_kw = None;
8436 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8437 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8438 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8439 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8440 // only, so we default to that if none was written.
8441 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8442 let mut channel_creation_height = Some(serialized_height);
8443 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8445 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8446 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8447 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8448 let mut latest_inbound_scid_alias = None;
8449 let mut outbound_scid_alias = None;
8450 let mut channel_pending_event_emitted = None;
8451 let mut channel_ready_event_emitted = None;
8453 let mut user_id_high_opt: Option<u64> = None;
8454 let mut channel_keys_id: Option<[u8; 32]> = None;
8455 let mut temporary_channel_id: Option<ChannelId> = None;
8456 let mut holder_max_accepted_htlcs: Option<u16> = None;
8458 let mut blocked_monitor_updates = Some(Vec::new());
8460 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8461 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8463 let mut is_batch_funding: Option<()> = None;
8465 let mut local_initiated_shutdown: Option<()> = None;
8467 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8468 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8470 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8472 read_tlv_fields!(reader, {
8473 (0, announcement_sigs, option),
8474 (1, minimum_depth, option),
8475 (2, channel_type, option),
8476 (3, counterparty_selected_channel_reserve_satoshis, option),
8477 (4, holder_selected_channel_reserve_satoshis, option),
8478 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8479 (6, holder_max_htlc_value_in_flight_msat, option),
8480 (7, shutdown_scriptpubkey, option),
8481 (8, blocked_monitor_updates, optional_vec),
8482 (9, target_closing_feerate_sats_per_kw, option),
8483 (11, monitor_pending_finalized_fulfills, optional_vec),
8484 (13, channel_creation_height, option),
8485 (15, preimages_opt, optional_vec),
8486 (17, announcement_sigs_state, option),
8487 (19, latest_inbound_scid_alias, option),
8488 (21, outbound_scid_alias, option),
8489 (23, channel_ready_event_emitted, option),
8490 (25, user_id_high_opt, option),
8491 (27, channel_keys_id, option),
8492 (28, holder_max_accepted_htlcs, option),
8493 (29, temporary_channel_id, option),
8494 (31, channel_pending_event_emitted, option),
8495 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8496 (37, holding_cell_skimmed_fees_opt, optional_vec),
8497 (38, is_batch_funding, option),
8498 (39, pending_outbound_blinding_points_opt, optional_vec),
8499 (41, holding_cell_blinding_points_opt, optional_vec),
8500 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8501 (45, local_initiated_shutdown, option),
8504 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8505 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8506 // If we've gotten to the funding stage of the channel, populate the signer with its
8507 // required channel parameters.
8508 if channel_state >= ChannelState::FundingNegotiated {
8509 holder_signer.provide_channel_parameters(&channel_parameters);
8511 (channel_keys_id, holder_signer)
8513 // `keys_data` can be `None` if we had corrupted data.
8514 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8515 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8516 (holder_signer.channel_keys_id(), holder_signer)
8519 if let Some(preimages) = preimages_opt {
8520 let mut iter = preimages.into_iter();
8521 for htlc in pending_outbound_htlcs.iter_mut() {
8523 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8524 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8526 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8527 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8532 // We expect all preimages to be consumed above
8533 if iter.next().is_some() {
8534 return Err(DecodeError::InvalidValue);
8538 let chan_features = channel_type.as_ref().unwrap();
8539 if !chan_features.is_subset(our_supported_features) {
8540 // If the channel was written by a new version and negotiated with features we don't
8541 // understand yet, refuse to read it.
8542 return Err(DecodeError::UnknownRequiredFeature);
8545 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8546 // To account for that, we're proactively setting/overriding the field here.
8547 channel_parameters.channel_type_features = chan_features.clone();
8549 let mut secp_ctx = Secp256k1::new();
8550 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8552 // `user_id` used to be a single u64 value. In order to remain backwards
8553 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8554 // separate u64 values.
8555 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8557 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8559 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8560 let mut iter = skimmed_fees.into_iter();
8561 for htlc in pending_outbound_htlcs.iter_mut() {
8562 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8564 // We expect all skimmed fees to be consumed above
8565 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8567 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8568 let mut iter = skimmed_fees.into_iter();
8569 for htlc in holding_cell_htlc_updates.iter_mut() {
8570 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8571 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8574 // We expect all skimmed fees to be consumed above
8575 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8577 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8578 let mut iter = blinding_pts.into_iter();
8579 for htlc in pending_outbound_htlcs.iter_mut() {
8580 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8582 // We expect all blinding points to be consumed above
8583 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8585 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8586 let mut iter = blinding_pts.into_iter();
8587 for htlc in holding_cell_htlc_updates.iter_mut() {
8588 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8589 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8592 // We expect all blinding points to be consumed above
8593 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8596 if let Some(malformed_htlcs) = malformed_htlcs {
8597 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8598 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8599 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8600 let matches = *htlc_id == malformed_htlc_id;
8601 if matches { debug_assert!(err_packet.data.is_empty()) }
8604 }).ok_or(DecodeError::InvalidValue)?;
8605 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8606 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8608 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8613 context: ChannelContext {
8616 config: config.unwrap(),
8620 // Note that we don't care about serializing handshake limits as we only ever serialize
8621 // channel data after the handshake has completed.
8622 inbound_handshake_limits_override: None,
8625 temporary_channel_id,
8627 announcement_sigs_state: announcement_sigs_state.unwrap(),
8629 channel_value_satoshis,
8631 latest_monitor_update_id,
8633 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8634 shutdown_scriptpubkey,
8637 cur_holder_commitment_transaction_number,
8638 cur_counterparty_commitment_transaction_number,
8641 holder_max_accepted_htlcs,
8642 pending_inbound_htlcs,
8643 pending_outbound_htlcs,
8644 holding_cell_htlc_updates,
8648 monitor_pending_channel_ready,
8649 monitor_pending_revoke_and_ack,
8650 monitor_pending_commitment_signed,
8651 monitor_pending_forwards,
8652 monitor_pending_failures,
8653 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8655 signer_pending_commitment_update: false,
8656 signer_pending_funding: false,
8659 holding_cell_update_fee,
8660 next_holder_htlc_id,
8661 next_counterparty_htlc_id,
8662 update_time_counter,
8665 #[cfg(debug_assertions)]
8666 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8667 #[cfg(debug_assertions)]
8668 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8670 last_sent_closing_fee: None,
8671 pending_counterparty_closing_signed: None,
8672 expecting_peer_commitment_signed: false,
8673 closing_fee_limits: None,
8674 target_closing_feerate_sats_per_kw,
8676 funding_tx_confirmed_in,
8677 funding_tx_confirmation_height,
8679 channel_creation_height: channel_creation_height.unwrap(),
8681 counterparty_dust_limit_satoshis,
8682 holder_dust_limit_satoshis,
8683 counterparty_max_htlc_value_in_flight_msat,
8684 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8685 counterparty_selected_channel_reserve_satoshis,
8686 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8687 counterparty_htlc_minimum_msat,
8688 holder_htlc_minimum_msat,
8689 counterparty_max_accepted_htlcs,
8692 counterparty_forwarding_info,
8694 channel_transaction_parameters: channel_parameters,
8695 funding_transaction,
8698 counterparty_cur_commitment_point,
8699 counterparty_prev_commitment_point,
8700 counterparty_node_id,
8702 counterparty_shutdown_scriptpubkey,
8706 channel_update_status,
8707 closing_signed_in_flight: false,
8711 #[cfg(any(test, fuzzing))]
8712 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8713 #[cfg(any(test, fuzzing))]
8714 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8716 workaround_lnd_bug_4006: None,
8717 sent_message_awaiting_response: None,
8719 latest_inbound_scid_alias,
8720 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8721 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8723 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8724 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8726 #[cfg(any(test, fuzzing))]
8727 historical_inbound_htlc_fulfills,
8729 channel_type: channel_type.unwrap(),
8732 local_initiated_shutdown,
8734 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8743 use bitcoin::blockdata::constants::ChainHash;
8744 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8745 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8746 use bitcoin::blockdata::opcodes;
8747 use bitcoin::network::constants::Network;
8748 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8749 use crate::ln::{PaymentHash, PaymentPreimage};
8750 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8751 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8752 use crate::ln::channel::InitFeatures;
8753 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8754 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8755 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8756 use crate::ln::msgs;
8757 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8758 use crate::ln::script::ShutdownScript;
8759 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8760 use crate::chain::BestBlock;
8761 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8762 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8763 use crate::chain::transaction::OutPoint;
8764 use crate::routing::router::{Path, RouteHop};
8765 use crate::util::config::UserConfig;
8766 use crate::util::errors::APIError;
8767 use crate::util::ser::{ReadableArgs, Writeable};
8768 use crate::util::test_utils;
8769 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8770 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8771 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8772 use bitcoin::secp256k1::{SecretKey,PublicKey};
8773 use bitcoin::hashes::sha256::Hash as Sha256;
8774 use bitcoin::hashes::Hash;
8775 use bitcoin::hashes::hex::FromHex;
8776 use bitcoin::hash_types::WPubkeyHash;
8777 use bitcoin::blockdata::locktime::absolute::LockTime;
8778 use bitcoin::address::{WitnessProgram, WitnessVersion};
8779 use crate::prelude::*;
8782 fn test_channel_state_order() {
8783 use crate::ln::channel::NegotiatingFundingFlags;
8784 use crate::ln::channel::AwaitingChannelReadyFlags;
8785 use crate::ln::channel::ChannelReadyFlags;
8787 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8788 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8789 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8790 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8793 struct TestFeeEstimator {
8796 impl FeeEstimator for TestFeeEstimator {
8797 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8803 fn test_max_funding_satoshis_no_wumbo() {
8804 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8805 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8806 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8810 signer: InMemorySigner,
8813 impl EntropySource for Keys {
8814 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8817 impl SignerProvider for Keys {
8818 type EcdsaSigner = InMemorySigner;
8820 type TaprootSigner = InMemorySigner;
8822 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8823 self.signer.channel_keys_id()
8826 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8830 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8832 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8833 let secp_ctx = Secp256k1::signing_only();
8834 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8835 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8836 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8839 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8840 let secp_ctx = Secp256k1::signing_only();
8841 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8842 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8846 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8847 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8848 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8852 fn upfront_shutdown_script_incompatibility() {
8853 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8854 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8855 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8858 let seed = [42; 32];
8859 let network = Network::Testnet;
8860 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8861 keys_provider.expect(OnGetShutdownScriptpubkey {
8862 returns: non_v0_segwit_shutdown_script.clone(),
8865 let secp_ctx = Secp256k1::new();
8866 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8867 let config = UserConfig::default();
8868 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8869 Err(APIError::IncompatibleShutdownScript { script }) => {
8870 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8872 Err(e) => panic!("Unexpected error: {:?}", e),
8873 Ok(_) => panic!("Expected error"),
8877 // Check that, during channel creation, we use the same feerate in the open channel message
8878 // as we do in the Channel object creation itself.
8880 fn test_open_channel_msg_fee() {
8881 let original_fee = 253;
8882 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8883 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8884 let secp_ctx = Secp256k1::new();
8885 let seed = [42; 32];
8886 let network = Network::Testnet;
8887 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8889 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8890 let config = UserConfig::default();
8891 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8893 // Now change the fee so we can check that the fee in the open_channel message is the
8894 // same as the old fee.
8895 fee_est.fee_est = 500;
8896 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8897 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
8901 fn test_holder_vs_counterparty_dust_limit() {
8902 // Test that when calculating the local and remote commitment transaction fees, the correct
8903 // dust limits are used.
8904 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8905 let secp_ctx = Secp256k1::new();
8906 let seed = [42; 32];
8907 let network = Network::Testnet;
8908 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8909 let logger = test_utils::TestLogger::new();
8910 let best_block = BestBlock::from_network(network);
8912 // Go through the flow of opening a channel between two nodes, making sure
8913 // they have different dust limits.
8915 // Create Node A's channel pointing to Node B's pubkey
8916 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8917 let config = UserConfig::default();
8918 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8920 // Create Node B's channel by receiving Node A's open_channel message
8921 // Make sure A's dust limit is as we expect.
8922 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8923 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8924 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8926 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8927 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8928 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
8929 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8930 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8932 // Node A --> Node B: funding created
8933 let output_script = node_a_chan.context.get_funding_redeemscript();
8934 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8935 value: 10000000, script_pubkey: output_script.clone(),
8937 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8938 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8939 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8941 // Node B --> Node A: funding signed
8942 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8943 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8945 // Put some inbound and outbound HTLCs in A's channel.
8946 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8947 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8949 amount_msat: htlc_amount_msat,
8950 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8951 cltv_expiry: 300000000,
8952 state: InboundHTLCState::Committed,
8955 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8957 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8958 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8959 cltv_expiry: 200000000,
8960 state: OutboundHTLCState::Committed,
8961 source: HTLCSource::OutboundRoute {
8962 path: Path { hops: Vec::new(), blinded_tail: None },
8963 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8964 first_hop_htlc_msat: 548,
8965 payment_id: PaymentId([42; 32]),
8967 skimmed_fee_msat: None,
8968 blinding_point: None,
8971 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8972 // the dust limit check.
8973 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8974 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8975 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8976 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8978 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8979 // of the HTLCs are seen to be above the dust limit.
8980 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8981 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8982 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8983 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8984 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8988 fn test_timeout_vs_success_htlc_dust_limit() {
8989 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8990 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8991 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8992 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8993 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8994 let secp_ctx = Secp256k1::new();
8995 let seed = [42; 32];
8996 let network = Network::Testnet;
8997 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8999 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9000 let config = UserConfig::default();
9001 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9003 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9004 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9006 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9007 // counted as dust when it shouldn't be.
9008 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9009 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9010 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9011 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9013 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9014 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9015 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9016 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9017 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9019 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9021 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9022 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9023 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9024 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9025 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9027 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9028 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9029 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9030 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9031 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9035 fn channel_reestablish_no_updates() {
9036 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9037 let logger = test_utils::TestLogger::new();
9038 let secp_ctx = Secp256k1::new();
9039 let seed = [42; 32];
9040 let network = Network::Testnet;
9041 let best_block = BestBlock::from_network(network);
9042 let chain_hash = ChainHash::using_genesis_block(network);
9043 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9045 // Go through the flow of opening a channel between two nodes.
9047 // Create Node A's channel pointing to Node B's pubkey
9048 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9049 let config = UserConfig::default();
9050 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9052 // Create Node B's channel by receiving Node A's open_channel message
9053 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9054 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9055 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9057 // Node B --> Node A: accept channel
9058 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9059 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9061 // Node A --> Node B: funding created
9062 let output_script = node_a_chan.context.get_funding_redeemscript();
9063 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9064 value: 10000000, script_pubkey: output_script.clone(),
9066 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9067 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9068 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9070 // Node B --> Node A: funding signed
9071 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9072 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9074 // Now disconnect the two nodes and check that the commitment point in
9075 // Node B's channel_reestablish message is sane.
9076 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9077 let msg = node_b_chan.get_channel_reestablish(&&logger);
9078 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9079 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9080 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9082 // Check that the commitment point in Node A's channel_reestablish message
9084 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9085 let msg = node_a_chan.get_channel_reestablish(&&logger);
9086 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9087 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9088 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9092 fn test_configured_holder_max_htlc_value_in_flight() {
9093 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9094 let logger = test_utils::TestLogger::new();
9095 let secp_ctx = Secp256k1::new();
9096 let seed = [42; 32];
9097 let network = Network::Testnet;
9098 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9099 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9100 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9102 let mut config_2_percent = UserConfig::default();
9103 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9104 let mut config_99_percent = UserConfig::default();
9105 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9106 let mut config_0_percent = UserConfig::default();
9107 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9108 let mut config_101_percent = UserConfig::default();
9109 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9111 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9112 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9113 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9114 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9115 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9116 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9118 // Test with the upper bound - 1 of valid values (99%).
9119 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9120 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9121 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9123 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9125 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9126 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9127 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9128 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9129 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9130 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9132 // Test with the upper bound - 1 of valid values (99%).
9133 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9134 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9135 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9137 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9138 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9139 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9140 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9141 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9143 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9144 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9146 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9147 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9148 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9150 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9151 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9152 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9153 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9154 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9156 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9157 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9159 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9160 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9161 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9165 fn test_configured_holder_selected_channel_reserve_satoshis() {
9167 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9168 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9169 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9171 // Test with valid but unreasonably high channel reserves
9172 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9173 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9174 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9176 // Test with calculated channel reserve less than lower bound
9177 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9178 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9180 // Test with invalid channel reserves since sum of both is greater than or equal
9182 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9183 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9186 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9187 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9188 let logger = test_utils::TestLogger::new();
9189 let secp_ctx = Secp256k1::new();
9190 let seed = [42; 32];
9191 let network = Network::Testnet;
9192 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9193 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9194 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9197 let mut outbound_node_config = UserConfig::default();
9198 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9199 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9201 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9202 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9204 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9205 let mut inbound_node_config = UserConfig::default();
9206 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9208 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9209 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9211 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9213 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9214 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9216 // Channel Negotiations failed
9217 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9218 assert!(result.is_err());
9223 fn channel_update() {
9224 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9225 let logger = test_utils::TestLogger::new();
9226 let secp_ctx = Secp256k1::new();
9227 let seed = [42; 32];
9228 let network = Network::Testnet;
9229 let best_block = BestBlock::from_network(network);
9230 let chain_hash = ChainHash::using_genesis_block(network);
9231 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9233 // Create Node A's channel pointing to Node B's pubkey
9234 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9235 let config = UserConfig::default();
9236 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9238 // Create Node B's channel by receiving Node A's open_channel message
9239 // Make sure A's dust limit is as we expect.
9240 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9241 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9242 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9244 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9245 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9246 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9247 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9248 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9250 // Node A --> Node B: funding created
9251 let output_script = node_a_chan.context.get_funding_redeemscript();
9252 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9253 value: 10000000, script_pubkey: output_script.clone(),
9255 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9256 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9257 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9259 // Node B --> Node A: funding signed
9260 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9261 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9263 // Make sure that receiving a channel update will update the Channel as expected.
9264 let update = ChannelUpdate {
9265 contents: UnsignedChannelUpdate {
9267 short_channel_id: 0,
9270 cltv_expiry_delta: 100,
9271 htlc_minimum_msat: 5,
9272 htlc_maximum_msat: MAX_VALUE_MSAT,
9274 fee_proportional_millionths: 11,
9275 excess_data: Vec::new(),
9277 signature: Signature::from(unsafe { FFISignature::new() })
9279 assert!(node_a_chan.channel_update(&update).unwrap());
9281 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9282 // change our official htlc_minimum_msat.
9283 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9284 match node_a_chan.context.counterparty_forwarding_info() {
9286 assert_eq!(info.cltv_expiry_delta, 100);
9287 assert_eq!(info.fee_base_msat, 110);
9288 assert_eq!(info.fee_proportional_millionths, 11);
9290 None => panic!("expected counterparty forwarding info to be Some")
9293 assert!(!node_a_chan.channel_update(&update).unwrap());
9297 fn blinding_point_skimmed_fee_malformed_ser() {
9298 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9300 let logger = test_utils::TestLogger::new();
9301 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9302 let secp_ctx = Secp256k1::new();
9303 let seed = [42; 32];
9304 let network = Network::Testnet;
9305 let best_block = BestBlock::from_network(network);
9306 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9308 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9309 let config = UserConfig::default();
9310 let features = channelmanager::provided_init_features(&config);
9311 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9312 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9314 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9315 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9316 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9318 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9319 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9320 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9322 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9323 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9324 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9325 Ok((chan, _, _)) => chan,
9326 Err((_, e)) => panic!("{}", e),
9329 let dummy_htlc_source = HTLCSource::OutboundRoute {
9331 hops: vec![RouteHop {
9332 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9333 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9334 cltv_expiry_delta: 0, maybe_announced_channel: false,
9338 session_priv: test_utils::privkey(42),
9339 first_hop_htlc_msat: 0,
9340 payment_id: PaymentId([42; 32]),
9342 let dummy_outbound_output = OutboundHTLCOutput {
9345 payment_hash: PaymentHash([43; 32]),
9347 state: OutboundHTLCState::Committed,
9348 source: dummy_htlc_source.clone(),
9349 skimmed_fee_msat: None,
9350 blinding_point: None,
9352 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9353 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9355 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9358 htlc.skimmed_fee_msat = Some(1);
9361 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9363 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9366 payment_hash: PaymentHash([43; 32]),
9367 source: dummy_htlc_source.clone(),
9368 onion_routing_packet: msgs::OnionPacket {
9370 public_key: Ok(test_utils::pubkey(1)),
9371 hop_data: [0; 20*65],
9374 skimmed_fee_msat: None,
9375 blinding_point: None,
9377 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9378 payment_preimage: PaymentPreimage([42; 32]),
9381 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9382 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9384 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9385 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9387 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9390 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9391 } else if i % 5 == 1 {
9392 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9393 } else if i % 5 == 2 {
9394 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9395 if let HTLCUpdateAwaitingACK::AddHTLC {
9396 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9397 } = &mut dummy_add {
9398 *blinding_point = Some(test_utils::pubkey(42 + i));
9399 *skimmed_fee_msat = Some(42);
9401 holding_cell_htlc_updates.push(dummy_add);
9402 } else if i % 5 == 3 {
9403 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9405 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9408 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9410 // Encode and decode the channel and ensure that the HTLCs within are the same.
9411 let encoded_chan = chan.encode();
9412 let mut s = crate::io::Cursor::new(&encoded_chan);
9413 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9414 let features = channelmanager::provided_channel_type_features(&config);
9415 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9416 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9417 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9420 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9422 fn outbound_commitment_test() {
9423 use bitcoin::sighash;
9424 use bitcoin::consensus::encode::serialize;
9425 use bitcoin::sighash::EcdsaSighashType;
9426 use bitcoin::hashes::hex::FromHex;
9427 use bitcoin::hash_types::Txid;
9428 use bitcoin::secp256k1::Message;
9429 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9430 use crate::ln::PaymentPreimage;
9431 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9432 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9433 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9434 use crate::util::logger::Logger;
9435 use crate::sync::Arc;
9436 use core::str::FromStr;
9437 use hex::DisplayHex;
9439 // Test vectors from BOLT 3 Appendices C and F (anchors):
9440 let feeest = TestFeeEstimator{fee_est: 15000};
9441 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9442 let secp_ctx = Secp256k1::new();
9444 let mut signer = InMemorySigner::new(
9446 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9447 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9448 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9449 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9450 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9452 // These aren't set in the test vectors:
9453 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9459 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9460 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9461 let keys_provider = Keys { signer: signer.clone() };
9463 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9464 let mut config = UserConfig::default();
9465 config.channel_handshake_config.announced_channel = false;
9466 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9467 chan.context.holder_dust_limit_satoshis = 546;
9468 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9470 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9472 let counterparty_pubkeys = ChannelPublicKeys {
9473 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9474 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9475 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9476 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9477 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9479 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9480 CounterpartyChannelTransactionParameters {
9481 pubkeys: counterparty_pubkeys.clone(),
9482 selected_contest_delay: 144
9484 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9485 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9487 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9488 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9490 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9491 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9493 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9494 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9496 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9497 // derived from a commitment_seed, so instead we copy it here and call
9498 // build_commitment_transaction.
9499 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9500 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9501 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9502 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9503 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9505 macro_rules! test_commitment {
9506 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9507 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9508 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9512 macro_rules! test_commitment_with_anchors {
9513 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9514 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9515 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9519 macro_rules! test_commitment_common {
9520 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9521 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9523 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9524 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9526 let htlcs = commitment_stats.htlcs_included.drain(..)
9527 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9529 (commitment_stats.tx, htlcs)
9531 let trusted_tx = commitment_tx.trust();
9532 let unsigned_tx = trusted_tx.built_transaction();
9533 let redeemscript = chan.context.get_funding_redeemscript();
9534 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9535 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9536 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9537 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9539 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9540 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9541 let mut counterparty_htlc_sigs = Vec::new();
9542 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9544 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9545 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9546 counterparty_htlc_sigs.push(remote_signature);
9548 assert_eq!(htlcs.len(), per_htlc.len());
9550 let holder_commitment_tx = HolderCommitmentTransaction::new(
9551 commitment_tx.clone(),
9552 counterparty_signature,
9553 counterparty_htlc_sigs,
9554 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9555 chan.context.counterparty_funding_pubkey()
9557 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9558 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9560 let funding_redeemscript = chan.context.get_funding_redeemscript();
9561 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9562 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9564 // ((htlc, counterparty_sig), (index, holder_sig))
9565 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9568 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9569 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9571 let ref htlc = htlcs[$htlc_idx];
9572 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9573 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9574 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9575 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9576 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9577 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9578 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9580 let mut preimage: Option<PaymentPreimage> = None;
9583 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9584 if out == htlc.payment_hash {
9585 preimage = Some(PaymentPreimage([i; 32]));
9589 assert!(preimage.is_some());
9592 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9593 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9594 channel_derivation_parameters: ChannelDerivationParameters {
9595 value_satoshis: chan.context.channel_value_satoshis,
9596 keys_id: chan.context.channel_keys_id,
9597 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9599 commitment_txid: trusted_tx.txid(),
9600 per_commitment_number: trusted_tx.commitment_number(),
9601 per_commitment_point: trusted_tx.per_commitment_point(),
9602 feerate_per_kw: trusted_tx.feerate_per_kw(),
9604 preimage: preimage.clone(),
9605 counterparty_sig: *htlc_counterparty_sig,
9606 }, &secp_ctx).unwrap();
9607 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9608 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9610 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9611 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9612 let trusted_tx = holder_commitment_tx.trust();
9613 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9614 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9615 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9617 assert!(htlc_counterparty_sig_iter.next().is_none());
9621 // anchors: simple commitment tx with no HTLCs and single anchor
9622 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9623 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9624 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9626 // simple commitment tx with no HTLCs
9627 chan.context.value_to_self_msat = 7000000000;
9629 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9630 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9631 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9633 // anchors: simple commitment tx with no HTLCs
9634 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9635 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9636 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9638 chan.context.pending_inbound_htlcs.push({
9639 let mut out = InboundHTLCOutput{
9641 amount_msat: 1000000,
9643 payment_hash: PaymentHash([0; 32]),
9644 state: InboundHTLCState::Committed,
9646 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9649 chan.context.pending_inbound_htlcs.push({
9650 let mut out = InboundHTLCOutput{
9652 amount_msat: 2000000,
9654 payment_hash: PaymentHash([0; 32]),
9655 state: InboundHTLCState::Committed,
9657 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9660 chan.context.pending_outbound_htlcs.push({
9661 let mut out = OutboundHTLCOutput{
9663 amount_msat: 2000000,
9665 payment_hash: PaymentHash([0; 32]),
9666 state: OutboundHTLCState::Committed,
9667 source: HTLCSource::dummy(),
9668 skimmed_fee_msat: None,
9669 blinding_point: None,
9671 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9674 chan.context.pending_outbound_htlcs.push({
9675 let mut out = OutboundHTLCOutput{
9677 amount_msat: 3000000,
9679 payment_hash: PaymentHash([0; 32]),
9680 state: OutboundHTLCState::Committed,
9681 source: HTLCSource::dummy(),
9682 skimmed_fee_msat: None,
9683 blinding_point: None,
9685 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9688 chan.context.pending_inbound_htlcs.push({
9689 let mut out = InboundHTLCOutput{
9691 amount_msat: 4000000,
9693 payment_hash: PaymentHash([0; 32]),
9694 state: InboundHTLCState::Committed,
9696 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9700 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9701 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9702 chan.context.feerate_per_kw = 0;
9704 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9705 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9706 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9709 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9710 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9711 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9714 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9715 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9716 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9719 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9720 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9721 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9724 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9725 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9726 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9729 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9730 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9731 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9734 // commitment tx with seven outputs untrimmed (maximum feerate)
9735 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9736 chan.context.feerate_per_kw = 647;
9738 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9739 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9740 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9743 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9744 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9745 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9748 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9749 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9750 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9753 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9754 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9755 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9758 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9759 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9760 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9763 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9764 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9765 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9768 // commitment tx with six outputs untrimmed (minimum feerate)
9769 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9770 chan.context.feerate_per_kw = 648;
9772 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9773 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9774 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9777 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9778 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9779 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9782 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9783 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9784 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9787 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9788 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9789 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9792 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9793 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9794 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9797 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9798 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9799 chan.context.feerate_per_kw = 645;
9800 chan.context.holder_dust_limit_satoshis = 1001;
9802 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9803 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9804 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9807 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9808 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9809 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9812 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9813 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9814 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9817 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9818 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9819 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9822 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9823 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9824 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9827 // commitment tx with six outputs untrimmed (maximum feerate)
9828 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9829 chan.context.feerate_per_kw = 2069;
9830 chan.context.holder_dust_limit_satoshis = 546;
9832 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9833 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9834 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9837 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9838 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9839 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9842 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9843 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9844 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9847 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9848 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9849 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9852 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9853 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9854 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9857 // commitment tx with five outputs untrimmed (minimum feerate)
9858 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9859 chan.context.feerate_per_kw = 2070;
9861 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9862 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9863 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9866 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9867 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9868 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9871 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9872 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9873 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9876 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9877 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9878 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9881 // commitment tx with five outputs untrimmed (maximum feerate)
9882 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9883 chan.context.feerate_per_kw = 2194;
9885 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9886 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9887 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9890 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9891 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9892 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9895 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9896 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9897 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9900 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9901 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9902 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9905 // commitment tx with four outputs untrimmed (minimum feerate)
9906 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9907 chan.context.feerate_per_kw = 2195;
9909 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9910 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9911 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9914 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9915 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9916 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9919 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9920 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9921 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9924 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9925 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9926 chan.context.feerate_per_kw = 2185;
9927 chan.context.holder_dust_limit_satoshis = 2001;
9928 let cached_channel_type = chan.context.channel_type;
9929 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9931 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9932 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9933 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9936 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9937 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9938 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9941 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9942 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9943 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9946 // commitment tx with four outputs untrimmed (maximum feerate)
9947 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9948 chan.context.feerate_per_kw = 3702;
9949 chan.context.holder_dust_limit_satoshis = 546;
9950 chan.context.channel_type = cached_channel_type.clone();
9952 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9953 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9954 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9957 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9958 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9959 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9962 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9963 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9964 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9967 // commitment tx with three outputs untrimmed (minimum feerate)
9968 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9969 chan.context.feerate_per_kw = 3703;
9971 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9972 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9973 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9976 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9977 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9978 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9981 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9982 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9983 chan.context.feerate_per_kw = 3687;
9984 chan.context.holder_dust_limit_satoshis = 3001;
9985 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9987 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9988 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9989 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9992 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9993 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9994 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9997 // commitment tx with three outputs untrimmed (maximum feerate)
9998 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9999 chan.context.feerate_per_kw = 4914;
10000 chan.context.holder_dust_limit_satoshis = 546;
10001 chan.context.channel_type = cached_channel_type.clone();
10003 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10004 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10005 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10008 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10009 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10010 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10013 // commitment tx with two outputs untrimmed (minimum feerate)
10014 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10015 chan.context.feerate_per_kw = 4915;
10016 chan.context.holder_dust_limit_satoshis = 546;
10018 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10019 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10020 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10022 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10023 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10024 chan.context.feerate_per_kw = 4894;
10025 chan.context.holder_dust_limit_satoshis = 4001;
10026 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10028 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10029 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10030 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10032 // commitment tx with two outputs untrimmed (maximum feerate)
10033 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10034 chan.context.feerate_per_kw = 9651180;
10035 chan.context.holder_dust_limit_satoshis = 546;
10036 chan.context.channel_type = cached_channel_type.clone();
10038 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10039 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10040 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10042 // commitment tx with one output untrimmed (minimum feerate)
10043 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10044 chan.context.feerate_per_kw = 9651181;
10046 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10047 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10048 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10050 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10051 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10052 chan.context.feerate_per_kw = 6216010;
10053 chan.context.holder_dust_limit_satoshis = 4001;
10054 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10056 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10057 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10058 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10060 // commitment tx with fee greater than funder amount
10061 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10062 chan.context.feerate_per_kw = 9651936;
10063 chan.context.holder_dust_limit_satoshis = 546;
10064 chan.context.channel_type = cached_channel_type;
10066 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10067 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10068 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10070 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10071 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10072 chan.context.feerate_per_kw = 253;
10073 chan.context.pending_inbound_htlcs.clear();
10074 chan.context.pending_inbound_htlcs.push({
10075 let mut out = InboundHTLCOutput{
10077 amount_msat: 2000000,
10079 payment_hash: PaymentHash([0; 32]),
10080 state: InboundHTLCState::Committed,
10082 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10085 chan.context.pending_outbound_htlcs.clear();
10086 chan.context.pending_outbound_htlcs.push({
10087 let mut out = OutboundHTLCOutput{
10089 amount_msat: 5000001,
10091 payment_hash: PaymentHash([0; 32]),
10092 state: OutboundHTLCState::Committed,
10093 source: HTLCSource::dummy(),
10094 skimmed_fee_msat: None,
10095 blinding_point: None,
10097 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10100 chan.context.pending_outbound_htlcs.push({
10101 let mut out = OutboundHTLCOutput{
10103 amount_msat: 5000000,
10105 payment_hash: PaymentHash([0; 32]),
10106 state: OutboundHTLCState::Committed,
10107 source: HTLCSource::dummy(),
10108 skimmed_fee_msat: None,
10109 blinding_point: None,
10111 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10115 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10116 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10117 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10120 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10121 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10122 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10124 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10125 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10126 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10128 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10129 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10130 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10133 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10134 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10135 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10136 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10139 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10140 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10141 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10143 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10144 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10145 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10147 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10148 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10149 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10154 fn test_per_commitment_secret_gen() {
10155 // Test vectors from BOLT 3 Appendix D:
10157 let mut seed = [0; 32];
10158 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10159 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10160 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10162 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10163 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10164 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10166 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10167 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10169 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10170 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10172 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10173 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10174 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10178 fn test_key_derivation() {
10179 // Test vectors from BOLT 3 Appendix E:
10180 let secp_ctx = Secp256k1::new();
10182 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10183 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10185 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10186 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10188 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10189 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10191 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10192 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10194 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10195 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10197 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10198 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10202 fn test_zero_conf_channel_type_support() {
10203 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10204 let secp_ctx = Secp256k1::new();
10205 let seed = [42; 32];
10206 let network = Network::Testnet;
10207 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10208 let logger = test_utils::TestLogger::new();
10210 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10211 let config = UserConfig::default();
10212 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10213 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10215 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10216 channel_type_features.set_zero_conf_required();
10218 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10219 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10220 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10221 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10222 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10223 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10224 assert!(res.is_ok());
10228 fn test_supports_anchors_zero_htlc_tx_fee() {
10229 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10230 // resulting `channel_type`.
10231 let secp_ctx = Secp256k1::new();
10232 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10233 let network = Network::Testnet;
10234 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10235 let logger = test_utils::TestLogger::new();
10237 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10238 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10240 let mut config = UserConfig::default();
10241 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10243 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10244 // need to signal it.
10245 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10246 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10247 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10248 &config, 0, 42, None
10250 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10252 let mut expected_channel_type = ChannelTypeFeatures::empty();
10253 expected_channel_type.set_static_remote_key_required();
10254 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10256 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10257 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10258 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10262 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10263 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10264 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10265 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10266 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10269 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10270 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10274 fn test_rejects_implicit_simple_anchors() {
10275 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10276 // each side's `InitFeatures`, it is rejected.
10277 let secp_ctx = Secp256k1::new();
10278 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10279 let network = Network::Testnet;
10280 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10281 let logger = test_utils::TestLogger::new();
10283 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10284 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10286 let config = UserConfig::default();
10288 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10289 let static_remote_key_required: u64 = 1 << 12;
10290 let simple_anchors_required: u64 = 1 << 20;
10291 let raw_init_features = static_remote_key_required | simple_anchors_required;
10292 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10294 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10295 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10296 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10300 // Set `channel_type` to `None` to force the implicit feature negotiation.
10301 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10302 open_channel_msg.common_fields.channel_type = None;
10304 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10305 // `static_remote_key`, it will fail the channel.
10306 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10307 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10308 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10309 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10311 assert!(channel_b.is_err());
10315 fn test_rejects_simple_anchors_channel_type() {
10316 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10318 let secp_ctx = Secp256k1::new();
10319 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10320 let network = Network::Testnet;
10321 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10322 let logger = test_utils::TestLogger::new();
10324 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10325 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10327 let config = UserConfig::default();
10329 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10330 let static_remote_key_required: u64 = 1 << 12;
10331 let simple_anchors_required: u64 = 1 << 20;
10332 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10333 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10334 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10335 assert!(!simple_anchors_init.requires_unknown_bits());
10336 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10338 // First, we'll try to open a channel between A and B where A requests a channel type for
10339 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10340 // B as it's not supported by LDK.
10341 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10342 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10343 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10347 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10348 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10350 let res = InboundV1Channel::<&TestKeysInterface>::new(
10351 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10352 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10353 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10355 assert!(res.is_err());
10357 // Then, we'll try to open another channel where A requests a channel type for
10358 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10359 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10361 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10362 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10363 10000000, 100000, 42, &config, 0, 42, None
10366 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10368 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10369 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10370 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10371 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10374 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10375 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10377 let res = channel_a.accept_channel(
10378 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10380 assert!(res.is_err());
10384 fn test_waiting_for_batch() {
10385 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10386 let logger = test_utils::TestLogger::new();
10387 let secp_ctx = Secp256k1::new();
10388 let seed = [42; 32];
10389 let network = Network::Testnet;
10390 let best_block = BestBlock::from_network(network);
10391 let chain_hash = ChainHash::using_genesis_block(network);
10392 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10394 let mut config = UserConfig::default();
10395 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10396 // channel in a batch before all channels are ready.
10397 config.channel_handshake_limits.trust_own_funding_0conf = true;
10399 // Create a channel from node a to node b that will be part of batch funding.
10400 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10401 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10406 &channelmanager::provided_init_features(&config),
10416 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10417 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10418 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10423 &channelmanager::provided_channel_type_features(&config),
10424 &channelmanager::provided_init_features(&config),
10430 true, // Allow node b to send a 0conf channel_ready.
10433 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10434 node_a_chan.accept_channel(
10435 &accept_channel_msg,
10436 &config.channel_handshake_limits,
10437 &channelmanager::provided_init_features(&config),
10440 // Fund the channel with a batch funding transaction.
10441 let output_script = node_a_chan.context.get_funding_redeemscript();
10442 let tx = Transaction {
10444 lock_time: LockTime::ZERO,
10448 value: 10000000, script_pubkey: output_script.clone(),
10451 value: 10000000, script_pubkey: Builder::new().into_script(),
10454 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10455 let funding_created_msg = node_a_chan.get_funding_created(
10456 tx.clone(), funding_outpoint, true, &&logger,
10457 ).map_err(|_| ()).unwrap();
10458 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10459 &funding_created_msg.unwrap(),
10463 ).map_err(|_| ()).unwrap();
10464 let node_b_updates = node_b_chan.monitor_updating_restored(
10472 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10473 // broadcasting the funding transaction until the batch is ready.
10474 let res = node_a_chan.funding_signed(
10475 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10477 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10478 let node_a_updates = node_a_chan.monitor_updating_restored(
10485 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10486 // as the funding transaction depends on all channels in the batch becoming ready.
10487 assert!(node_a_updates.channel_ready.is_none());
10488 assert!(node_a_updates.funding_broadcastable.is_none());
10489 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10491 // It is possible to receive a 0conf channel_ready from the remote node.
10492 node_a_chan.channel_ready(
10493 &node_b_updates.channel_ready.unwrap(),
10501 node_a_chan.context.channel_state,
10502 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10505 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10506 node_a_chan.set_batch_ready();
10507 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10508 assert!(node_a_chan.check_get_channel_ready(0).is_some());