1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 /// Exposes the state of pending inbound HTLCs.
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 /// through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 /// through the exchange of commitment_signed and revoke_and_ack messages.
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178 /// before this HTLC is included on the remote commitment transaction.
179 AwaitingRemoteRevokeToAdd,
180 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181 /// and is included in both commitment transactions.
183 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186 /// payment, it will only be claimed together with other required parts.
188 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191 /// commitment transaction after update_fulfill_htlc.
192 AwaitingRemoteRevokeToRemoveFulfill,
193 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
197 AwaitingRemoteRevokeToRemoveFail,
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
203 InboundHTLCState::RemoteAnnounced(_) => None,
204 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208 InboundHTLCState::Committed =>
209 Some(InboundHTLCStateDetails::Committed),
210 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221 (0, AwaitingRemoteRevokeToAdd) => {},
222 (2, Committed) => {},
223 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224 (6, AwaitingRemoteRevokeToRemoveFail) => {};
227 struct InboundHTLCOutput {
231 payment_hash: PaymentHash,
232 state: InboundHTLCState,
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
239 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241 /// and not part of any commitment transaction.
243 /// The amount in msat.
244 pub amount_msat: u64,
245 /// The block height at which this HTLC expires.
246 pub cltv_expiry: u32,
247 /// The payment hash.
248 pub payment_hash: PaymentHash,
249 /// The state of the HTLC in the state machine.
251 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252 /// waiting for to advance to the next state.
254 /// See [`InboundHTLCStateDetails`] for information on the specific states.
256 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257 /// states may result in `None` here.
258 pub state: Option<InboundHTLCStateDetails>,
259 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260 /// from the local commitment transaction and added to the commitment transaction fee.
261 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262 /// transactions as well.
264 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
268 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274 (0, htlc_id, required),
275 (2, amount_msat, required),
276 (4, cltv_expiry, required),
277 (6, payment_hash, required),
278 (7, state, upgradable_option),
279 (8, is_dust, required),
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286 /// we will promote to Committed (note that they may not accept it until the next time we
287 /// revoke, but we don't really care about that:
288 /// * they've revoked, so worst case we can announce an old state and get our (option on)
289 /// money back (though we won't), and,
290 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
291 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
292 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293 /// we'll never get out of sync).
294 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295 /// OutboundHTLCOutput's size just for a temporary bit
296 LocalAnnounced(Box<msgs::OnionPacket>),
298 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299 /// the change (though they'll need to revoke before we fail the payment).
300 RemoteRemoved(OutboundHTLCOutcome),
301 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305 /// remote revoke_and_ack on a previous state before we can do so.
306 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311 /// revoke_and_ack to drop completely.
312 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
315 /// Exposes the state of pending outbound HTLCs.
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 /// through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 /// through the exchange of commitment_signed and revoke_and_ack messages.
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331 /// on the remote's commitment transaction after update_add_htlc.
332 AwaitingRemoteRevokeToAdd,
333 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334 /// and receiving revoke_and_ack in return.
336 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
339 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342 /// for the removal from its commitment transaction.
343 AwaitingRemoteRevokeToRemoveSuccess,
344 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347 /// for the removal from its commitment transaction.
348 AwaitingRemoteRevokeToRemoveFailure,
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
354 OutboundHTLCState::LocalAnnounced(_) =>
355 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356 OutboundHTLCState::Committed =>
357 OutboundHTLCStateDetails::Committed,
358 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
360 OutboundHTLCState::RemoteRemoved(_) =>
361 OutboundHTLCStateDetails::Committed,
362 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375 (0, AwaitingRemoteRevokeToAdd) => {},
376 (2, Committed) => {},
377 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384 /// LDK version 0.0.105+ will always fill in the preimage here.
385 Success(Option<PaymentPreimage>),
386 Failure(HTLCFailReason),
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390 fn from(o: Option<HTLCFailReason>) -> Self {
392 None => OutboundHTLCOutcome::Success(None),
393 Some(r) => OutboundHTLCOutcome::Failure(r)
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399 fn into(self) -> Option<&'a HTLCFailReason> {
401 OutboundHTLCOutcome::Success(_) => None,
402 OutboundHTLCOutcome::Failure(ref r) => Some(r)
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
412 payment_hash: PaymentHash,
413 state: OutboundHTLCState,
415 blinding_point: Option<PublicKey>,
416 skimmed_fee_msat: Option<u64>,
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
423 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425 /// and not part of any commitment transaction.
427 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428 pub htlc_id: Option<u64>,
429 /// The amount in msat.
430 pub amount_msat: u64,
431 /// The block height at which this HTLC expires.
432 pub cltv_expiry: u32,
433 /// The payment hash.
434 pub payment_hash: PaymentHash,
435 /// The state of the HTLC in the state machine.
437 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438 /// waiting for to advance to the next state.
440 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
442 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443 /// states may result in `None` here.
444 pub state: Option<OutboundHTLCStateDetails>,
445 /// The extra fee being skimmed off the top of this HTLC.
446 pub skimmed_fee_msat: Option<u64>,
447 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448 /// from the local commitment transaction and added to the commitment transaction fee.
449 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450 /// transactions as well.
452 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
456 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462 (0, htlc_id, required),
463 (2, amount_msat, required),
464 (4, cltv_expiry, required),
465 (6, payment_hash, required),
466 (7, state, upgradable_option),
467 (8, skimmed_fee_msat, required),
468 (10, is_dust, required),
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
478 payment_hash: PaymentHash,
480 onion_routing_packet: msgs::OnionPacket,
481 // The extra fee we're skimming off the top of this HTLC.
482 skimmed_fee_msat: Option<u64>,
483 blinding_point: Option<PublicKey>,
486 payment_preimage: PaymentPreimage,
491 err_packet: msgs::OnionErrorPacket,
496 sha256_of_onion: [u8; 32],
500 macro_rules! define_state_flags {
501 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502 #[doc = $flag_type_doc]
503 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504 struct $flag_type(u32);
509 const $flag: $flag_type = $flag_type($value);
512 /// All flags that apply to the specified [`ChannelState`] variant.
514 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
517 fn new() -> Self { Self(0) }
520 fn from_u32(flags: u32) -> Result<Self, ()> {
521 if flags & !Self::ALL.0 != 0 {
524 Ok($flag_type(flags))
529 fn is_empty(&self) -> bool { self.0 == 0 }
531 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
533 fn set(&mut self, flag: Self) { *self |= flag }
535 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
539 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
542 impl core::ops::BitOr for $flag_type {
544 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
546 impl core::ops::BitOrAssign for $flag_type {
547 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
549 impl core::ops::BitAnd for $flag_type {
551 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
553 impl core::ops::BitAndAssign for $flag_type {
554 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
557 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
560 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
563 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
565 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
567 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
570 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
573 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
582 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
584 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
586 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
589 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
591 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
593 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
596 impl PartialEq<FundedStateFlags> for $flag_type {
597 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
599 impl From<FundedStateFlags> for $flag_type {
600 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
605 /// We declare all the states/flags here together to help determine which bits are still available
608 pub const OUR_INIT_SENT: u32 = 1 << 0;
609 pub const THEIR_INIT_SENT: u32 = 1 << 1;
610 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614 pub const CHANNEL_READY: u32 = 1 << 6;
615 pub const PEER_DISCONNECTED: u32 = 1 << 7;
616 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
625 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
627 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631 somewhere and we should pause sending any outbound messages until they've managed to \
632 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
645 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646 NegotiatingFundingFlags, [
647 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
655 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656 FUNDED_STATE, AwaitingChannelReadyFlags, [
657 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666 is being held until all channels in the batch have received `funding_signed` and have \
667 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
673 "Flags that only apply to [`ChannelState::ChannelReady`].",
674 FUNDED_STATE, ChannelReadyFlags, [
675 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
688 /// We are negotiating the parameters required for the channel prior to funding it.
689 NegotiatingFunding(NegotiatingFundingFlags),
690 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
694 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695 /// funding transaction to confirm.
696 AwaitingChannelReady(AwaitingChannelReadyFlags),
697 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
699 ChannelReady(ChannelReadyFlags),
700 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701 /// is about to drop us, but we store this anyway.
705 macro_rules! impl_state_flag {
706 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
708 fn $get(&self) -> bool {
711 ChannelState::$state(flags) => flags.$get(),
720 ChannelState::$state(flags) => flags.$set(),
722 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
726 fn $clear(&mut self) {
729 ChannelState::$state(flags) => { let _ = flags.$clear(); },
731 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
735 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
738 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739 impl_state_flag!($get, $set, $clear, [$state]);
744 fn from_u32(state: u32) -> Result<Self, ()> {
746 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
749 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754 .map(|flags| ChannelState::ChannelReady(flags))
755 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756 Ok(ChannelState::NegotiatingFunding(flags))
764 fn to_u32(&self) -> u32 {
766 ChannelState::NegotiatingFunding(flags) => flags.0,
767 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
774 fn is_pre_funded_state(&self) -> bool {
775 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
778 fn is_both_sides_shutdown(&self) -> bool {
779 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
782 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
784 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786 _ => FundedStateFlags::new(),
790 fn can_generate_new_commitment(&self) -> bool {
792 ChannelState::ChannelReady(flags) =>
793 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
797 debug_assert!(false, "Can only generate new commitment within ChannelReady");
803 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
875 impl fmt::Debug for ChannelError {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
878 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
885 impl fmt::Display for ChannelError {
886 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
888 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889 &ChannelError::Warn(ref e) => write!(f, "{}", e),
890 &ChannelError::Close(ref e) => write!(f, "{}", e),
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
897 pub peer_id: Option<PublicKey>,
898 pub channel_id: Option<ChannelId>,
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902 fn log(&self, mut record: Record) {
903 record.peer_id = self.peer_id;
904 record.channel_id = self.channel_id;
905 self.logger.log(record)
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912 where S::Target: SignerProvider
916 peer_id: Some(context.counterparty_node_id),
917 channel_id: Some(context.channel_id),
922 macro_rules! secp_check {
923 ($res: expr, $err: expr) => {
926 Err(_) => return Err(ChannelError::Close($err)),
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938 /// We've announced the channel as enabled and are connected to our peer.
940 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
942 /// Our channel is live again, but we haven't announced the channel as enabled yet.
944 /// We've announced the channel as disabled.
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
950 pub enum AnnouncementSigsState {
951 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952 /// we sent the last `AnnouncementSignatures`.
954 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955 /// This state never appears on disk - instead we write `NotSent`.
957 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960 /// they send back a `RevokeAndACK`.
961 /// This state never appears on disk - instead we write `NotSent`.
963 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
968 /// An enum indicating whether the local or remote side offered a given HTLC.
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
977 pending_htlcs_value_msat: u64,
978 on_counterparty_tx_dust_exposure_msat: u64,
979 on_holder_tx_dust_exposure_msat: u64,
980 holding_cell_msat: u64,
981 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986 tx: CommitmentTransaction, // the transaction info
987 feerate_per_kw: u32, // the feerate included to build the transaction
988 total_fee_sat: u64, // the total fee included in the transaction
989 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
990 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991 local_balance_msat: u64, // local balance before fees *not* considering dust limits
992 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
1000 origin: HTLCInitiator,
1003 impl HTLCCandidate {
1004 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1014 enum UpdateFulfillFetch {
1016 monitor_update: ChannelMonitorUpdate,
1017 htlc_value_msat: u64,
1018 msg: Option<msgs::UpdateFulfillHTLC>,
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027 /// previously placed in the holding cell (and has since been removed).
1029 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030 monitor_update: ChannelMonitorUpdate,
1031 /// The value of the HTLC which was claimed, in msat.
1032 htlc_value_msat: u64,
1034 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035 /// or has been forgotten (presumably previously claimed).
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041 pub raa: Option<msgs::RevokeAndACK>,
1042 pub commitment_update: Option<msgs::CommitmentUpdate>,
1043 pub order: RAACommitmentOrder,
1044 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047 pub funding_broadcastable: Option<Transaction>,
1048 pub channel_ready: Option<msgs::ChannelReady>,
1049 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1052 /// The return value of `signer_maybe_unblocked`
1054 pub(super) struct SignerResumeUpdates {
1055 pub commitment_update: Option<msgs::CommitmentUpdate>,
1056 pub funding_signed: Option<msgs::FundingSigned>,
1057 pub channel_ready: Option<msgs::ChannelReady>,
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062 pub channel_ready: Option<msgs::ChannelReady>,
1063 pub raa: Option<msgs::RevokeAndACK>,
1064 pub commitment_update: Option<msgs::CommitmentUpdate>,
1065 pub order: RAACommitmentOrder,
1066 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067 pub shutdown_msg: Option<msgs::Shutdown>,
1070 /// The result of a shutdown that should be handled.
1072 pub(crate) struct ShutdownResult {
1073 pub(crate) closure_reason: ClosureReason,
1074 /// A channel monitor update to apply.
1075 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079 /// propagated to the remainder of the batch.
1080 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081 pub(crate) channel_id: ChannelId,
1082 pub(crate) user_channel_id: u128,
1083 pub(crate) channel_capacity_satoshis: u64,
1084 pub(crate) counterparty_node_id: PublicKey,
1085 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086 pub(crate) channel_funding_txo: Option<OutPoint>,
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1130 /// * The expected interval between ticks (1 minute).
1131 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1151 struct PendingChannelMonitorUpdate {
1152 update: ChannelMonitorUpdate,
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156 (0, update, required),
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162 UnfundedOutboundV1(OutboundV1Channel<SP>),
1163 UnfundedInboundV1(InboundV1Channel<SP>),
1164 Funded(Channel<SP>),
1167 impl<'a, SP: Deref> ChannelPhase<SP> where
1168 SP::Target: SignerProvider,
1169 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1171 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1173 ChannelPhase::Funded(chan) => &chan.context,
1174 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1175 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1179 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1181 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1182 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1183 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1188 /// Contains all state common to unfunded inbound/outbound channels.
1189 pub(super) struct UnfundedChannelContext {
1190 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1191 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1192 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1194 /// This is so that we don't keep channels around that haven't progressed to a funded state
1195 /// in a timely manner.
1196 unfunded_channel_age_ticks: usize,
1199 impl UnfundedChannelContext {
1200 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1201 /// having reached the unfunded channel age limit.
1203 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1204 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1205 self.unfunded_channel_age_ticks += 1;
1206 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1210 /// Contains everything about the channel including state, and various flags.
1211 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1212 config: LegacyChannelConfig,
1214 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1215 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1216 // have elapsed since the update occurred.
1217 prev_config: Option<(ChannelConfig, usize)>,
1219 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1223 /// The current channel ID.
1224 channel_id: ChannelId,
1225 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1226 /// Will be `None` for channels created prior to 0.0.115.
1227 temporary_channel_id: Option<ChannelId>,
1228 channel_state: ChannelState,
1230 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1231 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1233 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1234 // Note that a number of our tests were written prior to the behavior here which retransmits
1235 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1237 #[cfg(any(test, feature = "_test_utils"))]
1238 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1239 #[cfg(not(any(test, feature = "_test_utils")))]
1240 announcement_sigs_state: AnnouncementSigsState,
1242 secp_ctx: Secp256k1<secp256k1::All>,
1243 channel_value_satoshis: u64,
1245 latest_monitor_update_id: u64,
1247 holder_signer: ChannelSignerType<SP>,
1248 shutdown_scriptpubkey: Option<ShutdownScript>,
1249 destination_script: ScriptBuf,
1251 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1252 // generation start at 0 and count up...this simplifies some parts of implementation at the
1253 // cost of others, but should really just be changed.
1255 cur_holder_commitment_transaction_number: u64,
1256 cur_counterparty_commitment_transaction_number: u64,
1257 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1258 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1259 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1260 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1262 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1263 /// need to ensure we resend them in the order we originally generated them. Note that because
1264 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1265 /// sufficient to simply set this to the opposite of any message we are generating as we
1266 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1267 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1269 resend_order: RAACommitmentOrder,
1271 monitor_pending_channel_ready: bool,
1272 monitor_pending_revoke_and_ack: bool,
1273 monitor_pending_commitment_signed: bool,
1275 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1276 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1277 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1278 // but need to handle this somehow or we run the risk of losing HTLCs!
1279 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1280 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1281 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1283 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1284 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1285 /// the future when the signer indicates it may have a signature for us.
1287 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1288 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1289 signer_pending_commitment_update: bool,
1290 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1291 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1292 /// outbound or inbound.
1293 signer_pending_funding: bool,
1295 // pending_update_fee is filled when sending and receiving update_fee.
1297 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1298 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1299 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1300 // HTLCs with similar state.
1301 pending_update_fee: Option<(u32, FeeUpdateState)>,
1302 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1303 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1304 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1305 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1306 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1307 holding_cell_update_fee: Option<u32>,
1308 next_holder_htlc_id: u64,
1309 next_counterparty_htlc_id: u64,
1310 feerate_per_kw: u32,
1312 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1313 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1314 /// new block is received, ensuring it's always at least moderately close to the current real
1316 update_time_counter: u32,
1318 #[cfg(debug_assertions)]
1319 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1320 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1321 #[cfg(debug_assertions)]
1322 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1323 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1325 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1326 target_closing_feerate_sats_per_kw: Option<u32>,
1328 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1329 /// update, we need to delay processing it until later. We do that here by simply storing the
1330 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1331 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1333 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1334 /// transaction. These are set once we reach `closing_negotiation_ready`.
1336 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1338 closing_fee_limits: Option<(u64, u64)>,
1340 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1341 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1342 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1343 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1344 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1346 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1347 /// until we see a `commitment_signed` before doing so.
1349 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1350 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1351 expecting_peer_commitment_signed: bool,
1353 /// The hash of the block in which the funding transaction was included.
1354 funding_tx_confirmed_in: Option<BlockHash>,
1355 funding_tx_confirmation_height: u32,
1356 short_channel_id: Option<u64>,
1357 /// Either the height at which this channel was created or the height at which it was last
1358 /// serialized if it was serialized by versions prior to 0.0.103.
1359 /// We use this to close if funding is never broadcasted.
1360 channel_creation_height: u32,
1362 counterparty_dust_limit_satoshis: u64,
1365 pub(super) holder_dust_limit_satoshis: u64,
1367 holder_dust_limit_satoshis: u64,
1370 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1372 counterparty_max_htlc_value_in_flight_msat: u64,
1375 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1377 holder_max_htlc_value_in_flight_msat: u64,
1379 /// minimum channel reserve for self to maintain - set by them.
1380 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1383 pub(super) holder_selected_channel_reserve_satoshis: u64,
1385 holder_selected_channel_reserve_satoshis: u64,
1387 counterparty_htlc_minimum_msat: u64,
1388 holder_htlc_minimum_msat: u64,
1390 pub counterparty_max_accepted_htlcs: u16,
1392 counterparty_max_accepted_htlcs: u16,
1393 holder_max_accepted_htlcs: u16,
1394 minimum_depth: Option<u32>,
1396 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1398 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1399 funding_transaction: Option<Transaction>,
1400 is_batch_funding: Option<()>,
1402 counterparty_cur_commitment_point: Option<PublicKey>,
1403 counterparty_prev_commitment_point: Option<PublicKey>,
1404 counterparty_node_id: PublicKey,
1406 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1408 commitment_secrets: CounterpartyCommitmentSecrets,
1410 channel_update_status: ChannelUpdateStatus,
1411 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1412 /// not complete within a single timer tick (one minute), we should force-close the channel.
1413 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1415 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1416 /// our peer and start the closing_signed negotiation fresh.
1417 closing_signed_in_flight: bool,
1419 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1420 /// This can be used to rebroadcast the channel_announcement message later.
1421 announcement_sigs: Option<(Signature, Signature)>,
1423 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1424 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1425 // be, by comparing the cached values to the fee of the tranaction generated by
1426 // `build_commitment_transaction`.
1427 #[cfg(any(test, fuzzing))]
1428 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1429 #[cfg(any(test, fuzzing))]
1430 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1432 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1433 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1434 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1435 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1436 /// message until we receive a channel_reestablish.
1438 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1439 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1441 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1442 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1443 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1444 /// unblock the state machine.
1446 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1447 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1448 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1450 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1451 /// [`msgs::RevokeAndACK`] message from the counterparty.
1452 sent_message_awaiting_response: Option<usize>,
1454 #[cfg(any(test, fuzzing))]
1455 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1456 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1457 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1458 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1459 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1460 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1461 historical_inbound_htlc_fulfills: HashSet<u64>,
1463 /// This channel's type, as negotiated during channel open
1464 channel_type: ChannelTypeFeatures,
1466 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1467 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1468 // the channel's funding UTXO.
1470 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1471 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1472 // associated channel mapping.
1474 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1475 // to store all of them.
1476 latest_inbound_scid_alias: Option<u64>,
1478 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1479 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1480 // don't currently support node id aliases and eventually privacy should be provided with
1481 // blinded paths instead of simple scid+node_id aliases.
1482 outbound_scid_alias: u64,
1484 // We track whether we already emitted a `ChannelPending` event.
1485 channel_pending_event_emitted: bool,
1487 // We track whether we already emitted a `ChannelReady` event.
1488 channel_ready_event_emitted: bool,
1490 /// Some if we initiated to shut down the channel.
1491 local_initiated_shutdown: Option<()>,
1493 /// The unique identifier used to re-derive the private key material for the channel through
1494 /// [`SignerProvider::derive_channel_signer`].
1496 channel_keys_id: [u8; 32],
1498 pub channel_keys_id: [u8; 32],
1500 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1501 /// store it here and only release it to the `ChannelManager` once it asks for it.
1502 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1505 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1506 fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1507 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1508 entropy_source: &'a ES,
1509 signer_provider: &'a SP,
1510 counterparty_node_id: PublicKey,
1511 their_features: &'a InitFeatures,
1513 config: &'a UserConfig,
1514 current_chain_height: u32,
1517 our_funding_satoshis: u64,
1518 counterparty_pubkeys: ChannelPublicKeys,
1519 channel_type: ChannelTypeFeatures,
1520 holder_selected_channel_reserve_satoshis: u64,
1521 msg_channel_reserve_satoshis: u64,
1523 open_channel_fields: msgs::CommonOpenChannelFields,
1524 ) -> Result<ChannelContext<SP>, ChannelError>
1526 ES::Target: EntropySource,
1527 F::Target: FeeEstimator,
1529 SP::Target: SignerProvider,
1531 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1532 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1534 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1536 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1537 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1538 let pubkeys = holder_signer.pubkeys().clone();
1540 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1541 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1544 // Check sanity of message fields:
1545 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1546 return Err(ChannelError::Close(format!(
1547 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1548 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1549 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1551 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1552 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1554 if msg_channel_reserve_satoshis > channel_value_satoshis {
1555 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1557 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1558 if msg_push_msat > full_channel_value_msat {
1559 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1561 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1562 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1564 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1565 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1567 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1569 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1570 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1571 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1573 if open_channel_fields.max_accepted_htlcs < 1 {
1574 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1576 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1577 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1580 // Now check against optional parameters as set by config...
1581 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1582 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1584 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1585 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1587 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1588 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1590 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1591 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1593 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1594 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1596 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1597 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1599 if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1600 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1603 // Convert things into internal flags and prep our state:
1605 if config.channel_handshake_limits.force_announced_channel_preference {
1606 if config.channel_handshake_config.announced_channel != announced_channel {
1607 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1611 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1612 // Protocol level safety check in place, although it should never happen because
1613 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1614 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1616 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1617 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1619 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1620 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1621 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1623 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1624 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1627 // check if the funder's amount for the initial commitment tx is sufficient
1628 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1629 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1630 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1634 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1635 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1636 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1637 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1640 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1641 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1642 // want to push much to us), our counterparty should always have more than our reserve.
1643 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1644 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1647 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1648 match &open_channel_fields.shutdown_scriptpubkey {
1649 &Some(ref script) => {
1650 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1651 if script.len() == 0 {
1654 if !script::is_bolt2_compliant(&script, their_features) {
1655 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1657 Some(script.clone())
1660 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1662 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1667 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1668 match signer_provider.get_shutdown_scriptpubkey() {
1669 Ok(scriptpubkey) => Some(scriptpubkey),
1670 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1674 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1675 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1676 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1680 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1681 Ok(script) => script,
1682 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1685 let mut secp_ctx = Secp256k1::new();
1686 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1688 let minimum_depth = if is_0conf {
1691 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1694 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1696 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1698 let channel_context = ChannelContext {
1701 config: LegacyChannelConfig {
1702 options: config.channel_config.clone(),
1704 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1709 inbound_handshake_limits_override: None,
1711 temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1712 channel_id: open_channel_fields.temporary_channel_id,
1713 channel_state: ChannelState::NegotiatingFunding(
1714 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1716 announcement_sigs_state: AnnouncementSigsState::NotSent,
1719 latest_monitor_update_id: 0,
1721 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1722 shutdown_scriptpubkey,
1725 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1726 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1729 pending_inbound_htlcs: Vec::new(),
1730 pending_outbound_htlcs: Vec::new(),
1731 holding_cell_htlc_updates: Vec::new(),
1732 pending_update_fee: None,
1733 holding_cell_update_fee: None,
1734 next_holder_htlc_id: 0,
1735 next_counterparty_htlc_id: 0,
1736 update_time_counter: 1,
1738 resend_order: RAACommitmentOrder::CommitmentFirst,
1740 monitor_pending_channel_ready: false,
1741 monitor_pending_revoke_and_ack: false,
1742 monitor_pending_commitment_signed: false,
1743 monitor_pending_forwards: Vec::new(),
1744 monitor_pending_failures: Vec::new(),
1745 monitor_pending_finalized_fulfills: Vec::new(),
1747 signer_pending_commitment_update: false,
1748 signer_pending_funding: false,
1751 #[cfg(debug_assertions)]
1752 holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1753 #[cfg(debug_assertions)]
1754 counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1756 last_sent_closing_fee: None,
1757 pending_counterparty_closing_signed: None,
1758 expecting_peer_commitment_signed: false,
1759 closing_fee_limits: None,
1760 target_closing_feerate_sats_per_kw: None,
1762 funding_tx_confirmed_in: None,
1763 funding_tx_confirmation_height: 0,
1764 short_channel_id: None,
1765 channel_creation_height: current_chain_height,
1767 feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1768 channel_value_satoshis,
1769 counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1770 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1771 counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1772 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1773 counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1774 holder_selected_channel_reserve_satoshis,
1775 counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1776 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1777 counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1778 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1781 counterparty_forwarding_info: None,
1783 channel_transaction_parameters: ChannelTransactionParameters {
1784 holder_pubkeys: pubkeys,
1785 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1786 is_outbound_from_holder: false,
1787 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1788 selected_contest_delay: open_channel_fields.to_self_delay,
1789 pubkeys: counterparty_pubkeys,
1791 funding_outpoint: None,
1792 channel_type_features: channel_type.clone()
1794 funding_transaction: None,
1795 is_batch_funding: None,
1797 counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1798 counterparty_prev_commitment_point: None,
1799 counterparty_node_id,
1801 counterparty_shutdown_scriptpubkey,
1803 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1805 channel_update_status: ChannelUpdateStatus::Enabled,
1806 closing_signed_in_flight: false,
1808 announcement_sigs: None,
1810 #[cfg(any(test, fuzzing))]
1811 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1812 #[cfg(any(test, fuzzing))]
1813 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1815 workaround_lnd_bug_4006: None,
1816 sent_message_awaiting_response: None,
1818 latest_inbound_scid_alias: None,
1819 outbound_scid_alias: 0,
1821 channel_pending_event_emitted: false,
1822 channel_ready_event_emitted: false,
1824 #[cfg(any(test, fuzzing))]
1825 historical_inbound_htlc_fulfills: new_hash_set(),
1830 local_initiated_shutdown: None,
1832 blocked_monitor_updates: Vec::new(),
1838 fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1839 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1840 entropy_source: &'a ES,
1841 signer_provider: &'a SP,
1842 counterparty_node_id: PublicKey,
1843 their_features: &'a InitFeatures,
1844 funding_satoshis: u64,
1847 config: &'a UserConfig,
1848 current_chain_height: u32,
1849 outbound_scid_alias: u64,
1850 temporary_channel_id: Option<ChannelId>,
1851 holder_selected_channel_reserve_satoshis: u64,
1852 channel_keys_id: [u8; 32],
1853 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1854 pubkeys: ChannelPublicKeys,
1855 ) -> Result<ChannelContext<SP>, APIError>
1857 ES::Target: EntropySource,
1858 F::Target: FeeEstimator,
1859 SP::Target: SignerProvider,
1861 // This will be updated with the counterparty contribution if this is a dual-funded channel
1862 let channel_value_satoshis = funding_satoshis;
1864 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1866 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1867 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1869 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1870 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1872 let channel_value_msat = channel_value_satoshis * 1000;
1873 if push_msat > channel_value_msat {
1874 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1876 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1877 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1880 let channel_type = get_initial_channel_type(&config, their_features);
1881 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1883 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1884 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1886 (ConfirmationTarget::NonAnchorChannelFee, 0)
1888 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1890 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1891 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1892 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1893 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1896 let mut secp_ctx = Secp256k1::new();
1897 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1899 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1900 match signer_provider.get_shutdown_scriptpubkey() {
1901 Ok(scriptpubkey) => Some(scriptpubkey),
1902 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1906 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1907 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1908 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1912 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1913 Ok(script) => script,
1914 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1917 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1922 config: LegacyChannelConfig {
1923 options: config.channel_config.clone(),
1924 announced_channel: config.channel_handshake_config.announced_channel,
1925 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1930 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1932 channel_id: temporary_channel_id,
1933 temporary_channel_id: Some(temporary_channel_id),
1934 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1935 announcement_sigs_state: AnnouncementSigsState::NotSent,
1937 // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
1938 channel_value_satoshis,
1940 latest_monitor_update_id: 0,
1942 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1943 shutdown_scriptpubkey,
1946 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1947 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1950 pending_inbound_htlcs: Vec::new(),
1951 pending_outbound_htlcs: Vec::new(),
1952 holding_cell_htlc_updates: Vec::new(),
1953 pending_update_fee: None,
1954 holding_cell_update_fee: None,
1955 next_holder_htlc_id: 0,
1956 next_counterparty_htlc_id: 0,
1957 update_time_counter: 1,
1959 resend_order: RAACommitmentOrder::CommitmentFirst,
1961 monitor_pending_channel_ready: false,
1962 monitor_pending_revoke_and_ack: false,
1963 monitor_pending_commitment_signed: false,
1964 monitor_pending_forwards: Vec::new(),
1965 monitor_pending_failures: Vec::new(),
1966 monitor_pending_finalized_fulfills: Vec::new(),
1968 signer_pending_commitment_update: false,
1969 signer_pending_funding: false,
1971 // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
1972 // when we receive `accept_channel2`.
1973 #[cfg(debug_assertions)]
1974 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1975 #[cfg(debug_assertions)]
1976 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1978 last_sent_closing_fee: None,
1979 pending_counterparty_closing_signed: None,
1980 expecting_peer_commitment_signed: false,
1981 closing_fee_limits: None,
1982 target_closing_feerate_sats_per_kw: None,
1984 funding_tx_confirmed_in: None,
1985 funding_tx_confirmation_height: 0,
1986 short_channel_id: None,
1987 channel_creation_height: current_chain_height,
1989 feerate_per_kw: commitment_feerate,
1990 counterparty_dust_limit_satoshis: 0,
1991 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1992 counterparty_max_htlc_value_in_flight_msat: 0,
1993 // We'll adjust this to include our counterparty's `funding_satoshis` when we
1994 // receive `accept_channel2`.
1995 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1996 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
1997 holder_selected_channel_reserve_satoshis,
1998 counterparty_htlc_minimum_msat: 0,
1999 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2000 counterparty_max_accepted_htlcs: 0,
2001 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2002 minimum_depth: None, // Filled in in accept_channel
2004 counterparty_forwarding_info: None,
2006 channel_transaction_parameters: ChannelTransactionParameters {
2007 holder_pubkeys: pubkeys,
2008 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2009 is_outbound_from_holder: true,
2010 counterparty_parameters: None,
2011 funding_outpoint: None,
2012 channel_type_features: channel_type.clone()
2014 funding_transaction: None,
2015 is_batch_funding: None,
2017 counterparty_cur_commitment_point: None,
2018 counterparty_prev_commitment_point: None,
2019 counterparty_node_id,
2021 counterparty_shutdown_scriptpubkey: None,
2023 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2025 channel_update_status: ChannelUpdateStatus::Enabled,
2026 closing_signed_in_flight: false,
2028 announcement_sigs: None,
2030 #[cfg(any(test, fuzzing))]
2031 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2032 #[cfg(any(test, fuzzing))]
2033 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2035 workaround_lnd_bug_4006: None,
2036 sent_message_awaiting_response: None,
2038 latest_inbound_scid_alias: None,
2039 outbound_scid_alias,
2041 channel_pending_event_emitted: false,
2042 channel_ready_event_emitted: false,
2044 #[cfg(any(test, fuzzing))]
2045 historical_inbound_htlc_fulfills: new_hash_set(),
2050 blocked_monitor_updates: Vec::new(),
2051 local_initiated_shutdown: None,
2055 /// Allowed in any state (including after shutdown)
2056 pub fn get_update_time_counter(&self) -> u32 {
2057 self.update_time_counter
2060 pub fn get_latest_monitor_update_id(&self) -> u64 {
2061 self.latest_monitor_update_id
2064 pub fn should_announce(&self) -> bool {
2065 self.config.announced_channel
2068 pub fn is_outbound(&self) -> bool {
2069 self.channel_transaction_parameters.is_outbound_from_holder
2072 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2073 /// Allowed in any state (including after shutdown)
2074 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2075 self.config.options.forwarding_fee_base_msat
2078 /// Returns true if we've ever received a message from the remote end for this Channel
2079 pub fn have_received_message(&self) -> bool {
2080 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2083 /// Returns true if this channel is fully established and not known to be closing.
2084 /// Allowed in any state (including after shutdown)
2085 pub fn is_usable(&self) -> bool {
2086 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2087 !self.channel_state.is_local_shutdown_sent() &&
2088 !self.channel_state.is_remote_shutdown_sent() &&
2089 !self.monitor_pending_channel_ready
2092 /// shutdown state returns the state of the channel in its various stages of shutdown
2093 pub fn shutdown_state(&self) -> ChannelShutdownState {
2094 match self.channel_state {
2095 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2096 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2097 ChannelShutdownState::ShutdownInitiated
2098 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2099 ChannelShutdownState::ResolvingHTLCs
2100 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2101 ChannelShutdownState::NegotiatingClosingFee
2103 ChannelShutdownState::NotShuttingDown
2105 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2106 _ => ChannelShutdownState::NotShuttingDown,
2110 fn closing_negotiation_ready(&self) -> bool {
2111 let is_ready_to_close = match self.channel_state {
2112 ChannelState::AwaitingChannelReady(flags) =>
2113 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2114 ChannelState::ChannelReady(flags) =>
2115 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2118 self.pending_inbound_htlcs.is_empty() &&
2119 self.pending_outbound_htlcs.is_empty() &&
2120 self.pending_update_fee.is_none() &&
2124 /// Returns true if this channel is currently available for use. This is a superset of
2125 /// is_usable() and considers things like the channel being temporarily disabled.
2126 /// Allowed in any state (including after shutdown)
2127 pub fn is_live(&self) -> bool {
2128 self.is_usable() && !self.channel_state.is_peer_disconnected()
2131 // Public utilities:
2133 pub fn channel_id(&self) -> ChannelId {
2137 // Return the `temporary_channel_id` used during channel establishment.
2139 // Will return `None` for channels created prior to LDK version 0.0.115.
2140 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2141 self.temporary_channel_id
2144 pub fn minimum_depth(&self) -> Option<u32> {
2148 /// Gets the "user_id" value passed into the construction of this channel. It has no special
2149 /// meaning and exists only to allow users to have a persistent identifier of a channel.
2150 pub fn get_user_id(&self) -> u128 {
2154 /// Gets the channel's type
2155 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2159 /// Gets the channel's `short_channel_id`.
2161 /// Will return `None` if the channel hasn't been confirmed yet.
2162 pub fn get_short_channel_id(&self) -> Option<u64> {
2163 self.short_channel_id
2166 /// Allowed in any state (including after shutdown)
2167 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2168 self.latest_inbound_scid_alias
2171 /// Allowed in any state (including after shutdown)
2172 pub fn outbound_scid_alias(&self) -> u64 {
2173 self.outbound_scid_alias
2176 /// Returns the holder signer for this channel.
2178 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2179 return &self.holder_signer
2182 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2183 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2184 /// or prior to any channel actions during `Channel` initialization.
2185 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2186 debug_assert_eq!(self.outbound_scid_alias, 0);
2187 self.outbound_scid_alias = outbound_scid_alias;
2190 /// Returns the funding_txo we either got from our peer, or were given by
2191 /// get_funding_created.
2192 pub fn get_funding_txo(&self) -> Option<OutPoint> {
2193 self.channel_transaction_parameters.funding_outpoint
2196 /// Returns the height in which our funding transaction was confirmed.
2197 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2198 let conf_height = self.funding_tx_confirmation_height;
2199 if conf_height > 0 {
2206 /// Returns the block hash in which our funding transaction was confirmed.
2207 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2208 self.funding_tx_confirmed_in
2211 /// Returns the current number of confirmations on the funding transaction.
2212 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2213 if self.funding_tx_confirmation_height == 0 {
2214 // We either haven't seen any confirmation yet, or observed a reorg.
2218 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2221 fn get_holder_selected_contest_delay(&self) -> u16 {
2222 self.channel_transaction_parameters.holder_selected_contest_delay
2225 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2226 &self.channel_transaction_parameters.holder_pubkeys
2229 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2230 self.channel_transaction_parameters.counterparty_parameters
2231 .as_ref().map(|params| params.selected_contest_delay)
2234 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2235 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2238 /// Allowed in any state (including after shutdown)
2239 pub fn get_counterparty_node_id(&self) -> PublicKey {
2240 self.counterparty_node_id
2243 /// Allowed in any state (including after shutdown)
2244 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2245 self.holder_htlc_minimum_msat
2248 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2249 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2250 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2253 /// Allowed in any state (including after shutdown)
2254 pub fn get_announced_htlc_max_msat(&self) -> u64 {
2256 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2257 // to use full capacity. This is an effort to reduce routing failures, because in many cases
2258 // channel might have been used to route very small values (either by honest users or as DoS).
2259 self.channel_value_satoshis * 1000 * 9 / 10,
2261 self.counterparty_max_htlc_value_in_flight_msat
2265 /// Allowed in any state (including after shutdown)
2266 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2267 self.counterparty_htlc_minimum_msat
2270 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2271 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2272 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2275 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2276 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2277 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2279 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2280 party_max_htlc_value_in_flight_msat
2285 pub fn get_value_satoshis(&self) -> u64 {
2286 self.channel_value_satoshis
2289 pub fn get_fee_proportional_millionths(&self) -> u32 {
2290 self.config.options.forwarding_fee_proportional_millionths
2293 pub fn get_cltv_expiry_delta(&self) -> u16 {
2294 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2297 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2298 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2299 where F::Target: FeeEstimator
2301 match self.config.options.max_dust_htlc_exposure {
2302 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2303 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2304 ConfirmationTarget::OnChainSweep) as u64;
2305 feerate_per_kw.saturating_mul(multiplier)
2307 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2311 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2312 pub fn prev_config(&self) -> Option<ChannelConfig> {
2313 self.prev_config.map(|prev_config| prev_config.0)
2316 // Checks whether we should emit a `ChannelPending` event.
2317 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2318 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2321 // Returns whether we already emitted a `ChannelPending` event.
2322 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2323 self.channel_pending_event_emitted
2326 // Remembers that we already emitted a `ChannelPending` event.
2327 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2328 self.channel_pending_event_emitted = true;
2331 // Checks whether we should emit a `ChannelReady` event.
2332 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2333 self.is_usable() && !self.channel_ready_event_emitted
2336 // Remembers that we already emitted a `ChannelReady` event.
2337 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2338 self.channel_ready_event_emitted = true;
2341 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2342 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2343 /// no longer be considered when forwarding HTLCs.
2344 pub fn maybe_expire_prev_config(&mut self) {
2345 if self.prev_config.is_none() {
2348 let prev_config = self.prev_config.as_mut().unwrap();
2350 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2351 self.prev_config = None;
2355 /// Returns the current [`ChannelConfig`] applied to the channel.
2356 pub fn config(&self) -> ChannelConfig {
2360 /// Updates the channel's config. A bool is returned indicating whether the config update
2361 /// applied resulted in a new ChannelUpdate message.
2362 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2363 let did_channel_update =
2364 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2365 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2366 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2367 if did_channel_update {
2368 self.prev_config = Some((self.config.options, 0));
2369 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2370 // policy change to propagate throughout the network.
2371 self.update_time_counter += 1;
2373 self.config.options = *config;
2377 /// Returns true if funding_signed was sent/received and the
2378 /// funding transaction has been broadcast if necessary.
2379 pub fn is_funding_broadcast(&self) -> bool {
2380 !self.channel_state.is_pre_funded_state() &&
2381 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2384 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2385 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2386 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2387 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2388 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2390 /// @local is used only to convert relevant internal structures which refer to remote vs local
2391 /// to decide value of outputs and direction of HTLCs.
2392 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2393 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2394 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2395 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2396 /// which peer generated this transaction and "to whom" this transaction flows.
2398 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2399 where L::Target: Logger
2401 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2402 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2403 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2405 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2406 let mut remote_htlc_total_msat = 0;
2407 let mut local_htlc_total_msat = 0;
2408 let mut value_to_self_msat_offset = 0;
2410 let mut feerate_per_kw = self.feerate_per_kw;
2411 if let Some((feerate, update_state)) = self.pending_update_fee {
2412 if match update_state {
2413 // Note that these match the inclusion criteria when scanning
2414 // pending_inbound_htlcs below.
2415 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2416 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2417 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
2419 feerate_per_kw = feerate;
2423 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2424 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2425 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2427 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2429 macro_rules! get_htlc_in_commitment {
2430 ($htlc: expr, $offered: expr) => {
2431 HTLCOutputInCommitment {
2433 amount_msat: $htlc.amount_msat,
2434 cltv_expiry: $htlc.cltv_expiry,
2435 payment_hash: $htlc.payment_hash,
2436 transaction_output_index: None
2441 macro_rules! add_htlc_output {
2442 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2443 if $outbound == local { // "offered HTLC output"
2444 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2445 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2448 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2450 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2451 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2452 included_non_dust_htlcs.push((htlc_in_tx, $source));
2454 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2455 included_dust_htlcs.push((htlc_in_tx, $source));
2458 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2459 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2462 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2464 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2465 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2466 included_non_dust_htlcs.push((htlc_in_tx, $source));
2468 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2469 included_dust_htlcs.push((htlc_in_tx, $source));
2475 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2477 for ref htlc in self.pending_inbound_htlcs.iter() {
2478 let (include, state_name) = match htlc.state {
2479 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2480 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2481 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2482 InboundHTLCState::Committed => (true, "Committed"),
2483 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2487 add_htlc_output!(htlc, false, None, state_name);
2488 remote_htlc_total_msat += htlc.amount_msat;
2490 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2492 &InboundHTLCState::LocalRemoved(ref reason) => {
2493 if generated_by_local {
2494 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2495 inbound_htlc_preimages.push(preimage);
2496 value_to_self_msat_offset += htlc.amount_msat as i64;
2506 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2508 for ref htlc in self.pending_outbound_htlcs.iter() {
2509 let (include, state_name) = match htlc.state {
2510 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2511 OutboundHTLCState::Committed => (true, "Committed"),
2512 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2513 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2514 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2517 let preimage_opt = match htlc.state {
2518 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2519 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2520 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2524 if let Some(preimage) = preimage_opt {
2525 outbound_htlc_preimages.push(preimage);
2529 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2530 local_htlc_total_msat += htlc.amount_msat;
2532 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2534 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2535 value_to_self_msat_offset -= htlc.amount_msat as i64;
2537 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2538 if !generated_by_local {
2539 value_to_self_msat_offset -= htlc.amount_msat as i64;
2547 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2548 assert!(value_to_self_msat >= 0);
2549 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2550 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2551 // "violate" their reserve value by couting those against it. Thus, we have to convert
2552 // everything to i64 before subtracting as otherwise we can overflow.
2553 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2554 assert!(value_to_remote_msat >= 0);
2556 #[cfg(debug_assertions)]
2558 // Make sure that the to_self/to_remote is always either past the appropriate
2559 // channel_reserve *or* it is making progress towards it.
2560 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2561 self.holder_max_commitment_tx_output.lock().unwrap()
2563 self.counterparty_max_commitment_tx_output.lock().unwrap()
2565 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2566 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2567 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2568 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2571 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2572 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2573 let (value_to_self, value_to_remote) = if self.is_outbound() {
2574 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2576 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2579 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2580 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2581 let (funding_pubkey_a, funding_pubkey_b) = if local {
2582 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2584 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2587 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2588 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2593 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2594 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2599 let num_nondust_htlcs = included_non_dust_htlcs.len();
2601 let channel_parameters =
2602 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2603 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2604 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2611 &mut included_non_dust_htlcs,
2614 let mut htlcs_included = included_non_dust_htlcs;
2615 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2616 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2617 htlcs_included.append(&mut included_dust_htlcs);
2625 local_balance_msat: value_to_self_msat as u64,
2626 remote_balance_msat: value_to_remote_msat as u64,
2627 inbound_htlc_preimages,
2628 outbound_htlc_preimages,
2633 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2634 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2635 /// our counterparty!)
2636 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2637 /// TODO Some magic rust shit to compile-time check this?
2638 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2639 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2640 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2641 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2642 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2644 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2648 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2649 /// will sign and send to our counterparty.
2650 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2651 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2652 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2653 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2654 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2656 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2659 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2660 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2661 /// Panics if called before accept_channel/InboundV1Channel::new
2662 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2663 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2666 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2667 &self.get_counterparty_pubkeys().funding_pubkey
2670 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2674 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2675 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2676 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2677 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2678 // more dust balance if the feerate increases when we have several HTLCs pending
2679 // which are near the dust limit.
2680 let mut feerate_per_kw = self.feerate_per_kw;
2681 // If there's a pending update fee, use it to ensure we aren't under-estimating
2682 // potential feerate updates coming soon.
2683 if let Some((feerate, _)) = self.pending_update_fee {
2684 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2686 if let Some(feerate) = outbound_feerate_update {
2687 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2689 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2690 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2693 /// Get forwarding information for the counterparty.
2694 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2695 self.counterparty_forwarding_info.clone()
2698 /// Returns a HTLCStats about inbound pending htlcs
2699 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2701 let mut stats = HTLCStats {
2702 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2703 pending_htlcs_value_msat: 0,
2704 on_counterparty_tx_dust_exposure_msat: 0,
2705 on_holder_tx_dust_exposure_msat: 0,
2706 holding_cell_msat: 0,
2707 on_holder_tx_holding_cell_htlcs_count: 0,
2710 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2713 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2714 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2715 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2717 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2718 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2719 for ref htlc in context.pending_inbound_htlcs.iter() {
2720 stats.pending_htlcs_value_msat += htlc.amount_msat;
2721 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2722 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2724 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2725 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2731 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2732 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2734 let mut stats = HTLCStats {
2735 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2736 pending_htlcs_value_msat: 0,
2737 on_counterparty_tx_dust_exposure_msat: 0,
2738 on_holder_tx_dust_exposure_msat: 0,
2739 holding_cell_msat: 0,
2740 on_holder_tx_holding_cell_htlcs_count: 0,
2743 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2746 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2747 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2748 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2750 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2751 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2752 for ref htlc in context.pending_outbound_htlcs.iter() {
2753 stats.pending_htlcs_value_msat += htlc.amount_msat;
2754 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2755 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2757 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2758 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2762 for update in context.holding_cell_htlc_updates.iter() {
2763 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2764 stats.pending_htlcs += 1;
2765 stats.pending_htlcs_value_msat += amount_msat;
2766 stats.holding_cell_msat += amount_msat;
2767 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2768 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2770 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2771 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2773 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2780 /// Returns information on all pending inbound HTLCs.
2781 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2782 let mut holding_cell_states = new_hash_map();
2783 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2784 match holding_cell_update {
2785 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2786 holding_cell_states.insert(
2788 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2791 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2792 holding_cell_states.insert(
2794 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2797 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2798 holding_cell_states.insert(
2800 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2804 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2807 let mut inbound_details = Vec::new();
2808 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2811 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2812 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2814 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2815 for htlc in self.pending_inbound_htlcs.iter() {
2816 if let Some(state_details) = (&htlc.state).into() {
2817 inbound_details.push(InboundHTLCDetails{
2818 htlc_id: htlc.htlc_id,
2819 amount_msat: htlc.amount_msat,
2820 cltv_expiry: htlc.cltv_expiry,
2821 payment_hash: htlc.payment_hash,
2822 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2823 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2830 /// Returns information on all pending outbound HTLCs.
2831 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2832 let mut outbound_details = Vec::new();
2833 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2836 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2837 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2839 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2840 for htlc in self.pending_outbound_htlcs.iter() {
2841 outbound_details.push(OutboundHTLCDetails{
2842 htlc_id: Some(htlc.htlc_id),
2843 amount_msat: htlc.amount_msat,
2844 cltv_expiry: htlc.cltv_expiry,
2845 payment_hash: htlc.payment_hash,
2846 skimmed_fee_msat: htlc.skimmed_fee_msat,
2847 state: Some((&htlc.state).into()),
2848 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2851 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2852 if let HTLCUpdateAwaitingACK::AddHTLC {
2858 } = *holding_cell_update {
2859 outbound_details.push(OutboundHTLCDetails{
2861 amount_msat: amount_msat,
2862 cltv_expiry: cltv_expiry,
2863 payment_hash: payment_hash,
2864 skimmed_fee_msat: skimmed_fee_msat,
2865 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2866 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2873 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2874 /// Doesn't bother handling the
2875 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2876 /// corner case properly.
2877 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2878 -> AvailableBalances
2879 where F::Target: FeeEstimator
2881 let context = &self;
2882 // Note that we have to handle overflow due to the above case.
2883 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2884 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2886 let mut balance_msat = context.value_to_self_msat;
2887 for ref htlc in context.pending_inbound_htlcs.iter() {
2888 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2889 balance_msat += htlc.amount_msat;
2892 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2894 let outbound_capacity_msat = context.value_to_self_msat
2895 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2897 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2899 let mut available_capacity_msat = outbound_capacity_msat;
2901 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2902 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2906 if context.is_outbound() {
2907 // We should mind channel commit tx fee when computing how much of the available capacity
2908 // can be used in the next htlc. Mirrors the logic in send_htlc.
2910 // The fee depends on whether the amount we will be sending is above dust or not,
2911 // and the answer will in turn change the amount itself — making it a circular
2913 // This complicates the computation around dust-values, up to the one-htlc-value.
2914 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2915 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2916 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2919 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2920 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2921 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2922 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2923 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2924 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2925 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2928 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2929 // value ends up being below dust, we have this fee available again. In that case,
2930 // match the value to right-below-dust.
2931 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2932 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2933 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2934 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2935 debug_assert!(one_htlc_difference_msat != 0);
2936 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2937 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2938 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2940 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2943 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2944 // sending a new HTLC won't reduce their balance below our reserve threshold.
2945 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2946 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2947 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2950 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2951 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2953 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2954 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2955 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2957 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2958 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2959 // we've selected for them, we can only send dust HTLCs.
2960 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2964 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2966 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2967 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2968 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2969 // send above the dust limit (as the router can always overpay to meet the dust limit).
2970 let mut remaining_msat_below_dust_exposure_limit = None;
2971 let mut dust_exposure_dust_limit_msat = 0;
2972 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2974 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2975 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2977 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2978 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2979 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2981 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2982 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2983 remaining_msat_below_dust_exposure_limit =
2984 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2985 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2988 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2989 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2990 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2991 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2992 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2993 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2996 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2997 if available_capacity_msat < dust_exposure_dust_limit_msat {
2998 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3000 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3004 available_capacity_msat = cmp::min(available_capacity_msat,
3005 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
3007 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
3008 available_capacity_msat = 0;
3012 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3013 - context.value_to_self_msat as i64
3014 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
3015 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3017 outbound_capacity_msat,
3018 next_outbound_htlc_limit_msat: available_capacity_msat,
3019 next_outbound_htlc_minimum_msat,
3024 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3025 let context = &self;
3026 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3029 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3030 /// number of pending HTLCs that are on track to be in our next commitment tx.
3032 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3033 /// `fee_spike_buffer_htlc` is `Some`.
3035 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3036 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3038 /// Dust HTLCs are excluded.
3039 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3040 let context = &self;
3041 assert!(context.is_outbound());
3043 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3046 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3047 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3049 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3050 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3052 let mut addl_htlcs = 0;
3053 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3055 HTLCInitiator::LocalOffered => {
3056 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3060 HTLCInitiator::RemoteOffered => {
3061 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3067 let mut included_htlcs = 0;
3068 for ref htlc in context.pending_inbound_htlcs.iter() {
3069 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3072 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3073 // transaction including this HTLC if it times out before they RAA.
3074 included_htlcs += 1;
3077 for ref htlc in context.pending_outbound_htlcs.iter() {
3078 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3082 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3083 OutboundHTLCState::Committed => included_htlcs += 1,
3084 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3085 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3086 // transaction won't be generated until they send us their next RAA, which will mean
3087 // dropping any HTLCs in this state.
3092 for htlc in context.holding_cell_htlc_updates.iter() {
3094 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3095 if amount_msat / 1000 < real_dust_limit_timeout_sat {
3100 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3101 // ack we're guaranteed to never include them in commitment txs anymore.
3105 let num_htlcs = included_htlcs + addl_htlcs;
3106 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3107 #[cfg(any(test, fuzzing))]
3110 if fee_spike_buffer_htlc.is_some() {
3111 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3113 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3114 + context.holding_cell_htlc_updates.len();
3115 let commitment_tx_info = CommitmentTxInfoCached {
3117 total_pending_htlcs,
3118 next_holder_htlc_id: match htlc.origin {
3119 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3120 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3122 next_counterparty_htlc_id: match htlc.origin {
3123 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3124 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3126 feerate: context.feerate_per_kw,
3128 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3133 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3134 /// pending HTLCs that are on track to be in their next commitment tx
3136 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3137 /// `fee_spike_buffer_htlc` is `Some`.
3139 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3140 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3142 /// Dust HTLCs are excluded.
3143 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3144 let context = &self;
3145 assert!(!context.is_outbound());
3147 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3150 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3151 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3153 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3154 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3156 let mut addl_htlcs = 0;
3157 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3159 HTLCInitiator::LocalOffered => {
3160 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3164 HTLCInitiator::RemoteOffered => {
3165 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3171 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3172 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3173 // committed outbound HTLCs, see below.
3174 let mut included_htlcs = 0;
3175 for ref htlc in context.pending_inbound_htlcs.iter() {
3176 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3179 included_htlcs += 1;
3182 for ref htlc in context.pending_outbound_htlcs.iter() {
3183 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3186 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3187 // i.e. if they've responded to us with an RAA after announcement.
3189 OutboundHTLCState::Committed => included_htlcs += 1,
3190 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3191 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3196 let num_htlcs = included_htlcs + addl_htlcs;
3197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3198 #[cfg(any(test, fuzzing))]
3201 if fee_spike_buffer_htlc.is_some() {
3202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3205 let commitment_tx_info = CommitmentTxInfoCached {
3207 total_pending_htlcs,
3208 next_holder_htlc_id: match htlc.origin {
3209 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3210 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3212 next_counterparty_htlc_id: match htlc.origin {
3213 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3214 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3216 feerate: context.feerate_per_kw,
3218 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3223 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3224 match self.channel_state {
3225 ChannelState::FundingNegotiated => f(),
3226 ChannelState::AwaitingChannelReady(flags) =>
3227 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3228 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3238 /// Returns the transaction if there is a pending funding transaction that is yet to be
3240 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3241 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3244 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3246 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3247 self.if_unbroadcasted_funding(||
3248 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3252 /// Returns whether the channel is funded in a batch.
3253 pub fn is_batch_funding(&self) -> bool {
3254 self.is_batch_funding.is_some()
3257 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3259 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3260 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3263 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3264 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3265 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3266 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3267 /// immediately (others we will have to allow to time out).
3268 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3269 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3270 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3271 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3272 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3273 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3275 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3276 // return them to fail the payment.
3277 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3278 let counterparty_node_id = self.get_counterparty_node_id();
3279 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3281 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3282 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3287 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3288 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3289 // returning a channel monitor update here would imply a channel monitor update before
3290 // we even registered the channel monitor to begin with, which is invalid.
3291 // Thus, if we aren't actually at a point where we could conceivably broadcast the
3292 // funding transaction, don't return a funding txo (which prevents providing the
3293 // monitor update to the user, even if we return one).
3294 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3295 if !self.channel_state.is_pre_funded_state() {
3296 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3297 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3298 update_id: self.latest_monitor_update_id,
3299 counterparty_node_id: Some(self.counterparty_node_id),
3300 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3301 channel_id: Some(self.channel_id()),
3305 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3306 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3308 self.channel_state = ChannelState::ShutdownComplete;
3309 self.update_time_counter += 1;
3313 dropped_outbound_htlcs,
3314 unbroadcasted_batch_funding_txid,
3315 channel_id: self.channel_id,
3316 user_channel_id: self.user_id,
3317 channel_capacity_satoshis: self.channel_value_satoshis,
3318 counterparty_node_id: self.counterparty_node_id,
3319 unbroadcasted_funding_tx,
3320 channel_funding_txo: self.get_funding_txo(),
3324 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3325 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3326 let counterparty_keys = self.build_remote_transaction_keys();
3327 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3329 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3330 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3331 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3332 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3334 match &self.holder_signer {
3335 // TODO (arik): move match into calling method for Taproot
3336 ChannelSignerType::Ecdsa(ecdsa) => {
3337 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3338 .map(|(signature, _)| msgs::FundingSigned {
3339 channel_id: self.channel_id(),
3342 partial_signature_with_nonce: None,
3346 if funding_signed.is_none() {
3347 #[cfg(not(async_signing))] {
3348 panic!("Failed to get signature for funding_signed");
3350 #[cfg(async_signing)] {
3351 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3352 self.signer_pending_funding = true;
3354 } else if self.signer_pending_funding {
3355 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3356 self.signer_pending_funding = false;
3359 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3360 (counterparty_initial_commitment_tx, funding_signed)
3362 // TODO (taproot|arik)
3369 // Internal utility functions for channels
3371 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3372 /// `channel_value_satoshis` in msat, set through
3373 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3375 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3377 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3378 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3379 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3381 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3384 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3386 channel_value_satoshis * 10 * configured_percent
3389 /// Returns a minimum channel reserve value the remote needs to maintain,
3390 /// required by us according to the configured or default
3391 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3393 /// Guaranteed to return a value no larger than channel_value_satoshis
3395 /// This is used both for outbound and inbound channels and has lower bound
3396 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3397 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3398 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3399 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3402 /// This is for legacy reasons, present for forward-compatibility.
3403 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3404 /// from storage. Hence, we use this function to not persist default values of
3405 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3406 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3407 let (q, _) = channel_value_satoshis.overflowing_div(100);
3408 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3411 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3412 /// default of 1% of the total channel value.
3414 /// Guaranteed to return a value no larger than channel_value_satoshis
3416 /// This is used both for outbound and inbound channels and has lower bound
3417 /// of `dust_limit_satoshis`.
3418 #[cfg(dual_funding)]
3419 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3420 // Fixed at 1% of channel value by spec.
3421 let (q, _) = channel_value_satoshis.overflowing_div(100);
3422 cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3425 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3426 // Note that num_htlcs should not include dust HTLCs.
3428 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3429 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3432 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3433 // Note that num_htlcs should not include dust HTLCs.
3434 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3435 // Note that we need to divide before multiplying to round properly,
3436 // since the lowest denomination of bitcoin on-chain is the satoshi.
3437 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3440 /// Context for dual-funded channels.
3441 #[cfg(dual_funding)]
3442 pub(super) struct DualFundingChannelContext {
3443 /// The amount in satoshis we will be contributing to the channel.
3444 pub our_funding_satoshis: u64,
3445 /// The amount in satoshis our counterparty will be contributing to the channel.
3446 pub their_funding_satoshis: u64,
3447 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3448 /// to the current block height to align incentives against fee-sniping.
3449 pub funding_tx_locktime: u32,
3450 /// The feerate set by the initiator to be used for the funding transaction.
3451 pub funding_feerate_sat_per_1000_weight: u32,
3454 // Holder designates channel data owned for the benefit of the user client.
3455 // Counterparty designates channel data owned by the another channel participant entity.
3456 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3457 pub context: ChannelContext<SP>,
3458 #[cfg(dual_funding)]
3459 pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3462 #[cfg(any(test, fuzzing))]
3463 struct CommitmentTxInfoCached {
3465 total_pending_htlcs: usize,
3466 next_holder_htlc_id: u64,
3467 next_counterparty_htlc_id: u64,
3471 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3472 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3473 trait FailHTLCContents {
3474 type Message: FailHTLCMessageName;
3475 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3476 fn to_inbound_htlc_state(self) -> InboundHTLCState;
3477 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3479 impl FailHTLCContents for msgs::OnionErrorPacket {
3480 type Message = msgs::UpdateFailHTLC;
3481 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3482 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3484 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3485 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3487 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3488 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3491 impl FailHTLCContents for ([u8; 32], u16) {
3492 type Message = msgs::UpdateFailMalformedHTLC;
3493 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3494 msgs::UpdateFailMalformedHTLC {
3497 sha256_of_onion: self.0,
3498 failure_code: self.1
3501 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3502 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3504 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3505 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3507 sha256_of_onion: self.0,
3508 failure_code: self.1
3513 trait FailHTLCMessageName {
3514 fn name() -> &'static str;
3516 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3517 fn name() -> &'static str {
3521 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3522 fn name() -> &'static str {
3523 "update_fail_malformed_htlc"
3527 impl<SP: Deref> Channel<SP> where
3528 SP::Target: SignerProvider,
3529 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3531 fn check_remote_fee<F: Deref, L: Deref>(
3532 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3533 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3534 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3536 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3537 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3539 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3541 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3542 if feerate_per_kw < lower_limit {
3543 if let Some(cur_feerate) = cur_feerate_per_kw {
3544 if feerate_per_kw > cur_feerate {
3546 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3547 cur_feerate, feerate_per_kw);
3551 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3557 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3558 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3559 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3560 // outside of those situations will fail.
3561 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3565 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3570 1 + // script length (0)
3574 )*4 + // * 4 for non-witness parts
3575 2 + // witness marker and flag
3576 1 + // witness element count
3577 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3578 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3579 2*(1 + 71); // two signatures + sighash type flags
3580 if let Some(spk) = a_scriptpubkey {
3581 ret += ((8+1) + // output values and script length
3582 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3584 if let Some(spk) = b_scriptpubkey {
3585 ret += ((8+1) + // output values and script length
3586 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3592 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3593 assert!(self.context.pending_inbound_htlcs.is_empty());
3594 assert!(self.context.pending_outbound_htlcs.is_empty());
3595 assert!(self.context.pending_update_fee.is_none());
3597 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3598 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3599 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3601 if value_to_holder < 0 {
3602 assert!(self.context.is_outbound());
3603 total_fee_satoshis += (-value_to_holder) as u64;
3604 } else if value_to_counterparty < 0 {
3605 assert!(!self.context.is_outbound());
3606 total_fee_satoshis += (-value_to_counterparty) as u64;
3609 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3610 value_to_counterparty = 0;
3613 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3614 value_to_holder = 0;
3617 assert!(self.context.shutdown_scriptpubkey.is_some());
3618 let holder_shutdown_script = self.get_closing_scriptpubkey();
3619 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3620 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3622 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3623 (closing_transaction, total_fee_satoshis)
3626 fn funding_outpoint(&self) -> OutPoint {
3627 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3630 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3633 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3634 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3636 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3638 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3639 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3640 where L::Target: Logger {
3641 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3642 // (see equivalent if condition there).
3643 assert!(!self.context.channel_state.can_generate_new_commitment());
3644 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3645 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3646 self.context.latest_monitor_update_id = mon_update_id;
3647 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3648 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3652 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3653 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3654 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3655 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3657 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3658 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3661 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3662 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3663 // these, but for now we just have to treat them as normal.
3665 let mut pending_idx = core::usize::MAX;
3666 let mut htlc_value_msat = 0;
3667 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3668 if htlc.htlc_id == htlc_id_arg {
3669 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3670 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3671 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3673 InboundHTLCState::Committed => {},
3674 InboundHTLCState::LocalRemoved(ref reason) => {
3675 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3677 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3678 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3680 return UpdateFulfillFetch::DuplicateClaim {};
3683 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3684 // Don't return in release mode here so that we can update channel_monitor
3688 htlc_value_msat = htlc.amount_msat;
3692 if pending_idx == core::usize::MAX {
3693 #[cfg(any(test, fuzzing))]
3694 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3695 // this is simply a duplicate claim, not previously failed and we lost funds.
3696 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3697 return UpdateFulfillFetch::DuplicateClaim {};
3700 // Now update local state:
3702 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3703 // can claim it even if the channel hits the chain before we see their next commitment.
3704 self.context.latest_monitor_update_id += 1;
3705 let monitor_update = ChannelMonitorUpdate {
3706 update_id: self.context.latest_monitor_update_id,
3707 counterparty_node_id: Some(self.context.counterparty_node_id),
3708 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3709 payment_preimage: payment_preimage_arg.clone(),
3711 channel_id: Some(self.context.channel_id()),
3714 if !self.context.channel_state.can_generate_new_commitment() {
3715 // Note that this condition is the same as the assertion in
3716 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3717 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3718 // do not not get into this branch.
3719 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3720 match pending_update {
3721 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3722 if htlc_id_arg == htlc_id {
3723 // Make sure we don't leave latest_monitor_update_id incremented here:
3724 self.context.latest_monitor_update_id -= 1;
3725 #[cfg(any(test, fuzzing))]
3726 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3727 return UpdateFulfillFetch::DuplicateClaim {};
3730 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3731 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3733 if htlc_id_arg == htlc_id {
3734 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3735 // TODO: We may actually be able to switch to a fulfill here, though its
3736 // rare enough it may not be worth the complexity burden.
3737 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3738 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3744 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3745 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3746 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3748 #[cfg(any(test, fuzzing))]
3749 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3750 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3752 #[cfg(any(test, fuzzing))]
3753 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3756 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3757 if let InboundHTLCState::Committed = htlc.state {
3759 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3760 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3762 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3763 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3766 UpdateFulfillFetch::NewClaim {
3769 msg: Some(msgs::UpdateFulfillHTLC {
3770 channel_id: self.context.channel_id(),
3771 htlc_id: htlc_id_arg,
3772 payment_preimage: payment_preimage_arg,
3777 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3778 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3779 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3780 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3781 // Even if we aren't supposed to let new monitor updates with commitment state
3782 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3783 // matter what. Sadly, to push a new monitor update which flies before others
3784 // already queued, we have to insert it into the pending queue and update the
3785 // update_ids of all the following monitors.
3786 if release_cs_monitor && msg.is_some() {
3787 let mut additional_update = self.build_commitment_no_status_check(logger);
3788 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3789 // to be strictly increasing by one, so decrement it here.
3790 self.context.latest_monitor_update_id = monitor_update.update_id;
3791 monitor_update.updates.append(&mut additional_update.updates);
3793 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3794 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3795 monitor_update.update_id = new_mon_id;
3796 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3797 held_update.update.update_id += 1;
3800 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3801 let update = self.build_commitment_no_status_check(logger);
3802 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3808 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3809 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3811 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3815 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3816 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3817 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3818 /// before we fail backwards.
3820 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3821 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3822 /// [`ChannelError::Ignore`].
3823 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3824 -> Result<(), ChannelError> where L::Target: Logger {
3825 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3826 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3829 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3830 /// want to fail blinded HTLCs where we are not the intro node.
3832 /// See [`Self::queue_fail_htlc`] for more info.
3833 pub fn queue_fail_malformed_htlc<L: Deref>(
3834 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3835 ) -> Result<(), ChannelError> where L::Target: Logger {
3836 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3837 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3840 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3841 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3842 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3843 /// before we fail backwards.
3845 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3846 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3847 /// [`ChannelError::Ignore`].
3848 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3849 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3851 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3852 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3853 panic!("Was asked to fail an HTLC when channel was not in an operational state");
3856 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3857 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3858 // these, but for now we just have to treat them as normal.
3860 let mut pending_idx = core::usize::MAX;
3861 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3862 if htlc.htlc_id == htlc_id_arg {
3864 InboundHTLCState::Committed => {},
3865 InboundHTLCState::LocalRemoved(ref reason) => {
3866 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3868 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3873 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3874 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3880 if pending_idx == core::usize::MAX {
3881 #[cfg(any(test, fuzzing))]
3882 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3883 // is simply a duplicate fail, not previously failed and we failed-back too early.
3884 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3888 if !self.context.channel_state.can_generate_new_commitment() {
3889 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3890 force_holding_cell = true;
3893 // Now update local state:
3894 if force_holding_cell {
3895 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3896 match pending_update {
3897 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3898 if htlc_id_arg == htlc_id {
3899 #[cfg(any(test, fuzzing))]
3900 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3904 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3905 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3907 if htlc_id_arg == htlc_id {
3908 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3909 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3915 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3916 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3920 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3921 E::Message::name(), &self.context.channel_id());
3923 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3924 htlc.state = err_contents.clone().to_inbound_htlc_state();
3927 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3930 // Message handlers:
3931 /// Updates the state of the channel to indicate that all channels in the batch have received
3932 /// funding_signed and persisted their monitors.
3933 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3934 /// treated as a non-batch channel going forward.
3935 pub fn set_batch_ready(&mut self) {
3936 self.context.is_batch_funding = None;
3937 self.context.channel_state.clear_waiting_for_batch();
3940 /// Unsets the existing funding information.
3942 /// This must only be used if the channel has not yet completed funding and has not been used.
3944 /// Further, the channel must be immediately shut down after this with a call to
3945 /// [`ChannelContext::force_shutdown`].
3946 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3947 debug_assert!(matches!(
3948 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3950 self.context.channel_transaction_parameters.funding_outpoint = None;
3951 self.context.channel_id = temporary_channel_id;
3954 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3955 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3957 pub fn channel_ready<NS: Deref, L: Deref>(
3958 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3959 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3960 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3962 NS::Target: NodeSigner,
3965 if self.context.channel_state.is_peer_disconnected() {
3966 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3967 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3970 if let Some(scid_alias) = msg.short_channel_id_alias {
3971 if Some(scid_alias) != self.context.short_channel_id {
3972 // The scid alias provided can be used to route payments *from* our counterparty,
3973 // i.e. can be used for inbound payments and provided in invoices, but is not used
3974 // when routing outbound payments.
3975 self.context.latest_inbound_scid_alias = Some(scid_alias);
3979 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3980 // batch, but we can receive channel_ready messages.
3981 let mut check_reconnection = false;
3982 match &self.context.channel_state {
3983 ChannelState::AwaitingChannelReady(flags) => {
3984 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3985 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3986 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3987 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3988 check_reconnection = true;
3989 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3990 self.context.channel_state.set_their_channel_ready();
3991 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3992 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3993 self.context.update_time_counter += 1;
3995 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3996 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3999 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4000 ChannelState::ChannelReady(_) => check_reconnection = true,
4001 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4003 if check_reconnection {
4004 // They probably disconnected/reconnected and re-sent the channel_ready, which is
4005 // required, or they're sending a fresh SCID alias.
4006 let expected_point =
4007 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4008 // If they haven't ever sent an updated point, the point they send should match
4010 self.context.counterparty_cur_commitment_point
4011 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4012 // If we've advanced the commitment number once, the second commitment point is
4013 // at `counterparty_prev_commitment_point`, which is not yet revoked.
4014 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4015 self.context.counterparty_prev_commitment_point
4017 // If they have sent updated points, channel_ready is always supposed to match
4018 // their "first" point, which we re-derive here.
4019 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4020 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4021 ).expect("We already advanced, so previous secret keys should have been validated already")))
4023 if expected_point != Some(msg.next_per_commitment_point) {
4024 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4029 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4030 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4032 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4034 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
4037 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4038 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4039 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4040 ) -> Result<(), ChannelError>
4041 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4042 FE::Target: FeeEstimator, L::Target: Logger,
4044 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4045 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4047 // We can't accept HTLCs sent after we've sent a shutdown.
4048 if self.context.channel_state.is_local_shutdown_sent() {
4049 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4051 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4052 if self.context.channel_state.is_remote_shutdown_sent() {
4053 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4055 if self.context.channel_state.is_peer_disconnected() {
4056 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4058 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4059 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4061 if msg.amount_msat == 0 {
4062 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4064 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4065 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4068 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4069 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4070 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
4071 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4073 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4074 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4077 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4078 // the reserve_satoshis we told them to always have as direct payment so that they lose
4079 // something if we punish them for broadcasting an old state).
4080 // Note that we don't really care about having a small/no to_remote output in our local
4081 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4082 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4083 // present in the next commitment transaction we send them (at least for fulfilled ones,
4084 // failed ones won't modify value_to_self).
4085 // Note that we will send HTLCs which another instance of rust-lightning would think
4086 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4087 // Channel state once they will not be present in the next received commitment
4089 let mut removed_outbound_total_msat = 0;
4090 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4091 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4092 removed_outbound_total_msat += htlc.amount_msat;
4093 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4094 removed_outbound_total_msat += htlc.amount_msat;
4098 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4099 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4102 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4103 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4104 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4106 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4107 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4108 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4109 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4110 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4111 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4112 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4116 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4117 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4118 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4119 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4120 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4121 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4122 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4126 let pending_value_to_self_msat =
4127 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
4128 let pending_remote_value_msat =
4129 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4130 if pending_remote_value_msat < msg.amount_msat {
4131 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4134 // Check that the remote can afford to pay for this HTLC on-chain at the current
4135 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4137 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4138 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4139 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4141 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4142 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4146 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4147 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4149 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4150 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4154 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4155 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4159 if !self.context.is_outbound() {
4160 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4161 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
4162 // side, only on the sender's. Note that with anchor outputs we are no longer as
4163 // sensitive to fee spikes, so we need to account for them.
4164 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4165 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4166 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4167 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4169 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4170 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
4171 // the HTLC, i.e. its status is already set to failing.
4172 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4173 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4176 // Check that they won't violate our local required channel reserve by adding this HTLC.
4177 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4178 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4179 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4180 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4183 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4184 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4186 if msg.cltv_expiry >= 500000000 {
4187 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4190 if self.context.channel_state.is_local_shutdown_sent() {
4191 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4192 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4196 // Now update local state:
4197 self.context.next_counterparty_htlc_id += 1;
4198 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4199 htlc_id: msg.htlc_id,
4200 amount_msat: msg.amount_msat,
4201 payment_hash: msg.payment_hash,
4202 cltv_expiry: msg.cltv_expiry,
4203 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
4208 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4210 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4211 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4212 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4213 if htlc.htlc_id == htlc_id {
4214 let outcome = match check_preimage {
4215 None => fail_reason.into(),
4216 Some(payment_preimage) => {
4217 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4218 if payment_hash != htlc.payment_hash {
4219 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4221 OutboundHTLCOutcome::Success(Some(payment_preimage))
4225 OutboundHTLCState::LocalAnnounced(_) =>
4226 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4227 OutboundHTLCState::Committed => {
4228 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4230 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4231 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4236 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4239 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4240 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4241 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4243 if self.context.channel_state.is_peer_disconnected() {
4244 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4247 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4250 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4251 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4252 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4254 if self.context.channel_state.is_peer_disconnected() {
4255 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4258 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4262 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4263 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4264 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4266 if self.context.channel_state.is_peer_disconnected() {
4267 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4270 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4274 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4275 where L::Target: Logger
4277 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4278 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4280 if self.context.channel_state.is_peer_disconnected() {
4281 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4283 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4284 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4287 let funding_script = self.context.get_funding_redeemscript();
4289 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4291 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4292 let commitment_txid = {
4293 let trusted_tx = commitment_stats.tx.trust();
4294 let bitcoin_tx = trusted_tx.built_transaction();
4295 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4297 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4298 log_bytes!(msg.signature.serialize_compact()[..]),
4299 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4300 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4301 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4302 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4306 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4308 // If our counterparty updated the channel fee in this commitment transaction, check that
4309 // they can actually afford the new fee now.
4310 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4311 update_state == FeeUpdateState::RemoteAnnounced
4314 debug_assert!(!self.context.is_outbound());
4315 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4316 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4317 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4320 #[cfg(any(test, fuzzing))]
4322 if self.context.is_outbound() {
4323 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4324 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4325 if let Some(info) = projected_commit_tx_info {
4326 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4327 + self.context.holding_cell_htlc_updates.len();
4328 if info.total_pending_htlcs == total_pending_htlcs
4329 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4330 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4331 && info.feerate == self.context.feerate_per_kw {
4332 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4338 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4339 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4342 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4343 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4344 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4345 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4346 // backwards compatibility, we never use it in production. To provide test coverage, here,
4347 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4348 #[allow(unused_assignments, unused_mut)]
4349 let mut separate_nondust_htlc_sources = false;
4350 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4351 use core::hash::{BuildHasher, Hasher};
4352 // Get a random value using the only std API to do so - the DefaultHasher
4353 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4354 separate_nondust_htlc_sources = rand_val % 2 == 0;
4357 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4358 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4359 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4360 if let Some(_) = htlc.transaction_output_index {
4361 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4362 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4363 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4365 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4366 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4367 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4368 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4369 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4370 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4371 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4372 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4374 if !separate_nondust_htlc_sources {
4375 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4378 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4380 if separate_nondust_htlc_sources {
4381 if let Some(source) = source_opt.take() {
4382 nondust_htlc_sources.push(source);
4385 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4388 let holder_commitment_tx = HolderCommitmentTransaction::new(
4389 commitment_stats.tx,
4391 msg.htlc_signatures.clone(),
4392 &self.context.get_holder_pubkeys().funding_pubkey,
4393 self.context.counterparty_funding_pubkey()
4396 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4397 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4399 // Update state now that we've passed all the can-fail calls...
4400 let mut need_commitment = false;
4401 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4402 if *update_state == FeeUpdateState::RemoteAnnounced {
4403 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4404 need_commitment = true;
4408 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4409 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
4410 Some(forward_info.clone())
4412 if let Some(forward_info) = new_forward {
4413 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4414 &htlc.payment_hash, &self.context.channel_id);
4415 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
4416 need_commitment = true;
4419 let mut claimed_htlcs = Vec::new();
4420 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4421 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4422 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4423 &htlc.payment_hash, &self.context.channel_id);
4424 // Grab the preimage, if it exists, instead of cloning
4425 let mut reason = OutboundHTLCOutcome::Success(None);
4426 mem::swap(outcome, &mut reason);
4427 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4428 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4429 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4430 // have a `Success(None)` reason. In this case we could forget some HTLC
4431 // claims, but such an upgrade is unlikely and including claimed HTLCs here
4432 // fixes a bug which the user was exposed to on 0.0.104 when they started the
4434 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4436 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4437 need_commitment = true;
4441 self.context.latest_monitor_update_id += 1;
4442 let mut monitor_update = ChannelMonitorUpdate {
4443 update_id: self.context.latest_monitor_update_id,
4444 counterparty_node_id: Some(self.context.counterparty_node_id),
4445 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4446 commitment_tx: holder_commitment_tx,
4447 htlc_outputs: htlcs_and_sigs,
4449 nondust_htlc_sources,
4451 channel_id: Some(self.context.channel_id()),
4454 self.context.cur_holder_commitment_transaction_number -= 1;
4455 self.context.expecting_peer_commitment_signed = false;
4456 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4457 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4458 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4460 if self.context.channel_state.is_monitor_update_in_progress() {
4461 // In case we initially failed monitor updating without requiring a response, we need
4462 // to make sure the RAA gets sent first.
4463 self.context.monitor_pending_revoke_and_ack = true;
4464 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4465 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4466 // the corresponding HTLC status updates so that
4467 // get_last_commitment_update_for_send includes the right HTLCs.
4468 self.context.monitor_pending_commitment_signed = true;
4469 let mut additional_update = self.build_commitment_no_status_check(logger);
4470 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4471 // strictly increasing by one, so decrement it here.
4472 self.context.latest_monitor_update_id = monitor_update.update_id;
4473 monitor_update.updates.append(&mut additional_update.updates);
4475 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4476 &self.context.channel_id);
4477 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4480 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4481 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4482 // we'll send one right away when we get the revoke_and_ack when we
4483 // free_holding_cell_htlcs().
4484 let mut additional_update = self.build_commitment_no_status_check(logger);
4485 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4486 // strictly increasing by one, so decrement it here.
4487 self.context.latest_monitor_update_id = monitor_update.update_id;
4488 monitor_update.updates.append(&mut additional_update.updates);
4492 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4493 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4494 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4495 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4498 /// Public version of the below, checking relevant preconditions first.
4499 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4500 /// returns `(None, Vec::new())`.
4501 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4502 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4503 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4504 where F::Target: FeeEstimator, L::Target: Logger
4506 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4507 self.free_holding_cell_htlcs(fee_estimator, logger)
4508 } else { (None, Vec::new()) }
4511 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4512 /// for our counterparty.
4513 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4514 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4515 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4516 where F::Target: FeeEstimator, L::Target: Logger
4518 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4519 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4520 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4521 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4523 let mut monitor_update = ChannelMonitorUpdate {
4524 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4525 counterparty_node_id: Some(self.context.counterparty_node_id),
4526 updates: Vec::new(),
4527 channel_id: Some(self.context.channel_id()),
4530 let mut htlc_updates = Vec::new();
4531 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4532 let mut update_add_count = 0;
4533 let mut update_fulfill_count = 0;
4534 let mut update_fail_count = 0;
4535 let mut htlcs_to_fail = Vec::new();
4536 for htlc_update in htlc_updates.drain(..) {
4537 // Note that this *can* fail, though it should be due to rather-rare conditions on
4538 // fee races with adding too many outputs which push our total payments just over
4539 // the limit. In case it's less rare than I anticipate, we may want to revisit
4540 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4541 // to rebalance channels.
4542 let fail_htlc_res = match &htlc_update {
4543 &HTLCUpdateAwaitingACK::AddHTLC {
4544 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4545 skimmed_fee_msat, blinding_point, ..
4547 match self.send_htlc(
4548 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4549 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4551 Ok(_) => update_add_count += 1,
4554 ChannelError::Ignore(ref msg) => {
4555 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4556 // If we fail to send here, then this HTLC should
4557 // be failed backwards. Failing to send here
4558 // indicates that this HTLC may keep being put back
4559 // into the holding cell without ever being
4560 // successfully forwarded/failed/fulfilled, causing
4561 // our counterparty to eventually close on us.
4562 htlcs_to_fail.push((source.clone(), *payment_hash));
4565 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4572 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4573 // If an HTLC claim was previously added to the holding cell (via
4574 // `get_update_fulfill_htlc`, then generating the claim message itself must
4575 // not fail - any in between attempts to claim the HTLC will have resulted
4576 // in it hitting the holding cell again and we cannot change the state of a
4577 // holding cell HTLC from fulfill to anything else.
4578 let mut additional_monitor_update =
4579 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4580 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4581 { monitor_update } else { unreachable!() };
4582 update_fulfill_count += 1;
4583 monitor_update.updates.append(&mut additional_monitor_update.updates);
4586 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4587 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4588 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4590 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4591 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4592 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4595 if let Some(res) = fail_htlc_res {
4597 Ok(fail_msg_opt) => {
4598 // If an HTLC failure was previously added to the holding cell (via
4599 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4600 // not fail - we should never end up in a state where we double-fail
4601 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4602 // for a full revocation before failing.
4603 debug_assert!(fail_msg_opt.is_some());
4604 update_fail_count += 1;
4606 Err(ChannelError::Ignore(_)) => {},
4608 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4613 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4614 return (None, htlcs_to_fail);
4616 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4617 self.send_update_fee(feerate, false, fee_estimator, logger)
4622 let mut additional_update = self.build_commitment_no_status_check(logger);
4623 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4624 // but we want them to be strictly increasing by one, so reset it here.
4625 self.context.latest_monitor_update_id = monitor_update.update_id;
4626 monitor_update.updates.append(&mut additional_update.updates);
4628 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4629 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4630 update_add_count, update_fulfill_count, update_fail_count);
4632 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4633 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4639 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4640 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4641 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4642 /// generating an appropriate error *after* the channel state has been updated based on the
4643 /// revoke_and_ack message.
4644 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4645 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4646 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4647 where F::Target: FeeEstimator, L::Target: Logger,
4649 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4650 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4652 if self.context.channel_state.is_peer_disconnected() {
4653 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4655 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4656 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4659 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4661 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4662 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4663 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4667 if !self.context.channel_state.is_awaiting_remote_revoke() {
4668 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4669 // haven't given them a new commitment transaction to broadcast). We should probably
4670 // take advantage of this by updating our channel monitor, sending them an error, and
4671 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4672 // lot of work, and there's some chance this is all a misunderstanding anyway.
4673 // We have to do *something*, though, since our signer may get mad at us for otherwise
4674 // jumping a remote commitment number, so best to just force-close and move on.
4675 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4678 #[cfg(any(test, fuzzing))]
4680 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4681 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4684 match &self.context.holder_signer {
4685 ChannelSignerType::Ecdsa(ecdsa) => {
4686 ecdsa.validate_counterparty_revocation(
4687 self.context.cur_counterparty_commitment_transaction_number + 1,
4689 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4691 // TODO (taproot|arik)
4696 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4697 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4698 self.context.latest_monitor_update_id += 1;
4699 let mut monitor_update = ChannelMonitorUpdate {
4700 update_id: self.context.latest_monitor_update_id,
4701 counterparty_node_id: Some(self.context.counterparty_node_id),
4702 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4703 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4704 secret: msg.per_commitment_secret,
4706 channel_id: Some(self.context.channel_id()),
4709 // Update state now that we've passed all the can-fail calls...
4710 // (note that we may still fail to generate the new commitment_signed message, but that's
4711 // OK, we step the channel here and *then* if the new generation fails we can fail the
4712 // channel based on that, but stepping stuff here should be safe either way.
4713 self.context.channel_state.clear_awaiting_remote_revoke();
4714 self.context.sent_message_awaiting_response = None;
4715 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4716 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4717 self.context.cur_counterparty_commitment_transaction_number -= 1;
4719 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4720 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4723 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4724 let mut to_forward_infos = Vec::new();
4725 let mut revoked_htlcs = Vec::new();
4726 let mut finalized_claimed_htlcs = Vec::new();
4727 let mut update_fail_htlcs = Vec::new();
4728 let mut update_fail_malformed_htlcs = Vec::new();
4729 let mut require_commitment = false;
4730 let mut value_to_self_msat_diff: i64 = 0;
4733 // Take references explicitly so that we can hold multiple references to self.context.
4734 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4735 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4736 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4738 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4739 pending_inbound_htlcs.retain(|htlc| {
4740 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4741 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4742 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4743 value_to_self_msat_diff += htlc.amount_msat as i64;
4745 *expecting_peer_commitment_signed = true;
4749 pending_outbound_htlcs.retain(|htlc| {
4750 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4751 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4752 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4753 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4755 finalized_claimed_htlcs.push(htlc.source.clone());
4756 // They fulfilled, so we sent them money
4757 value_to_self_msat_diff -= htlc.amount_msat as i64;
4762 for htlc in pending_inbound_htlcs.iter_mut() {
4763 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4765 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4769 let mut state = InboundHTLCState::Committed;
4770 mem::swap(&mut state, &mut htlc.state);
4772 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4773 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4774 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4775 require_commitment = true;
4776 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4777 match forward_info {
4778 PendingHTLCStatus::Fail(fail_msg) => {
4779 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4780 require_commitment = true;
4782 HTLCFailureMsg::Relay(msg) => {
4783 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4784 update_fail_htlcs.push(msg)
4786 HTLCFailureMsg::Malformed(msg) => {
4787 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4788 update_fail_malformed_htlcs.push(msg)
4792 PendingHTLCStatus::Forward(forward_info) => {
4793 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4794 to_forward_infos.push((forward_info, htlc.htlc_id));
4795 htlc.state = InboundHTLCState::Committed;
4801 for htlc in pending_outbound_htlcs.iter_mut() {
4802 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4803 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4804 htlc.state = OutboundHTLCState::Committed;
4805 *expecting_peer_commitment_signed = true;
4807 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4808 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4809 // Grab the preimage, if it exists, instead of cloning
4810 let mut reason = OutboundHTLCOutcome::Success(None);
4811 mem::swap(outcome, &mut reason);
4812 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4813 require_commitment = true;
4817 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4819 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4820 match update_state {
4821 FeeUpdateState::Outbound => {
4822 debug_assert!(self.context.is_outbound());
4823 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4824 self.context.feerate_per_kw = feerate;
4825 self.context.pending_update_fee = None;
4826 self.context.expecting_peer_commitment_signed = true;
4828 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4829 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4830 debug_assert!(!self.context.is_outbound());
4831 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4832 require_commitment = true;
4833 self.context.feerate_per_kw = feerate;
4834 self.context.pending_update_fee = None;
4839 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4840 let release_state_str =
4841 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4842 macro_rules! return_with_htlcs_to_fail {
4843 ($htlcs_to_fail: expr) => {
4844 if !release_monitor {
4845 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4846 update: monitor_update,
4848 return Ok(($htlcs_to_fail, None));
4850 return Ok(($htlcs_to_fail, Some(monitor_update)));
4855 if self.context.channel_state.is_monitor_update_in_progress() {
4856 // We can't actually generate a new commitment transaction (incl by freeing holding
4857 // cells) while we can't update the monitor, so we just return what we have.
4858 if require_commitment {
4859 self.context.monitor_pending_commitment_signed = true;
4860 // When the monitor updating is restored we'll call
4861 // get_last_commitment_update_for_send(), which does not update state, but we're
4862 // definitely now awaiting a remote revoke before we can step forward any more, so
4864 let mut additional_update = self.build_commitment_no_status_check(logger);
4865 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4866 // strictly increasing by one, so decrement it here.
4867 self.context.latest_monitor_update_id = monitor_update.update_id;
4868 monitor_update.updates.append(&mut additional_update.updates);
4870 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4871 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4872 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4873 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4874 return_with_htlcs_to_fail!(Vec::new());
4877 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4878 (Some(mut additional_update), htlcs_to_fail) => {
4879 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4880 // strictly increasing by one, so decrement it here.
4881 self.context.latest_monitor_update_id = monitor_update.update_id;
4882 monitor_update.updates.append(&mut additional_update.updates);
4884 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4885 &self.context.channel_id(), release_state_str);
4887 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4888 return_with_htlcs_to_fail!(htlcs_to_fail);
4890 (None, htlcs_to_fail) => {
4891 if require_commitment {
4892 let mut additional_update = self.build_commitment_no_status_check(logger);
4894 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4895 // strictly increasing by one, so decrement it here.
4896 self.context.latest_monitor_update_id = monitor_update.update_id;
4897 monitor_update.updates.append(&mut additional_update.updates);
4899 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4900 &self.context.channel_id(),
4901 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4904 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4905 return_with_htlcs_to_fail!(htlcs_to_fail);
4907 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4908 &self.context.channel_id(), release_state_str);
4910 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4911 return_with_htlcs_to_fail!(htlcs_to_fail);
4917 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4918 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4919 /// commitment update.
4920 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4921 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4922 where F::Target: FeeEstimator, L::Target: Logger
4924 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4925 assert!(msg_opt.is_none(), "We forced holding cell?");
4928 /// Adds a pending update to this channel. See the doc for send_htlc for
4929 /// further details on the optionness of the return value.
4930 /// If our balance is too low to cover the cost of the next commitment transaction at the
4931 /// new feerate, the update is cancelled.
4933 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4934 /// [`Channel`] if `force_holding_cell` is false.
4935 fn send_update_fee<F: Deref, L: Deref>(
4936 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4937 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4938 ) -> Option<msgs::UpdateFee>
4939 where F::Target: FeeEstimator, L::Target: Logger
4941 if !self.context.is_outbound() {
4942 panic!("Cannot send fee from inbound channel");
4944 if !self.context.is_usable() {
4945 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4947 if !self.context.is_live() {
4948 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4951 // Before proposing a feerate update, check that we can actually afford the new fee.
4952 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4953 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4954 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4955 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4956 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4957 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4958 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4959 //TODO: auto-close after a number of failures?
4960 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4964 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4965 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4966 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4967 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4968 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4969 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4972 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4973 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4977 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4978 force_holding_cell = true;
4981 if force_holding_cell {
4982 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4986 debug_assert!(self.context.pending_update_fee.is_none());
4987 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4989 Some(msgs::UpdateFee {
4990 channel_id: self.context.channel_id,
4995 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4996 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4998 /// No further message handling calls may be made until a channel_reestablish dance has
5000 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5001 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5002 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5003 if self.context.channel_state.is_pre_funded_state() {
5007 if self.context.channel_state.is_peer_disconnected() {
5008 // While the below code should be idempotent, it's simpler to just return early, as
5009 // redundant disconnect events can fire, though they should be rare.
5013 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5014 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5017 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5018 // will be retransmitted.
5019 self.context.last_sent_closing_fee = None;
5020 self.context.pending_counterparty_closing_signed = None;
5021 self.context.closing_fee_limits = None;
5023 let mut inbound_drop_count = 0;
5024 self.context.pending_inbound_htlcs.retain(|htlc| {
5026 InboundHTLCState::RemoteAnnounced(_) => {
5027 // They sent us an update_add_htlc but we never got the commitment_signed.
5028 // We'll tell them what commitment_signed we're expecting next and they'll drop
5029 // this HTLC accordingly
5030 inbound_drop_count += 1;
5033 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5034 // We received a commitment_signed updating this HTLC and (at least hopefully)
5035 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5036 // in response to it yet, so don't touch it.
5039 InboundHTLCState::Committed => true,
5040 InboundHTLCState::LocalRemoved(_) => {
5041 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5042 // re-transmit if needed) and they may have even sent a revoke_and_ack back
5043 // (that we missed). Keep this around for now and if they tell us they missed
5044 // the commitment_signed we can re-transmit the update then.
5049 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5051 if let Some((_, update_state)) = self.context.pending_update_fee {
5052 if update_state == FeeUpdateState::RemoteAnnounced {
5053 debug_assert!(!self.context.is_outbound());
5054 self.context.pending_update_fee = None;
5058 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5059 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5060 // They sent us an update to remove this but haven't yet sent the corresponding
5061 // commitment_signed, we need to move it back to Committed and they can re-send
5062 // the update upon reconnection.
5063 htlc.state = OutboundHTLCState::Committed;
5067 self.context.sent_message_awaiting_response = None;
5069 self.context.channel_state.set_peer_disconnected();
5070 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5074 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5075 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5076 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5077 /// update completes (potentially immediately).
5078 /// The messages which were generated with the monitor update must *not* have been sent to the
5079 /// remote end, and must instead have been dropped. They will be regenerated when
5080 /// [`Self::monitor_updating_restored`] is called.
5082 /// [`ChannelManager`]: super::channelmanager::ChannelManager
5083 /// [`chain::Watch`]: crate::chain::Watch
5084 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5085 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5086 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5087 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5088 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5090 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5091 self.context.monitor_pending_commitment_signed |= resend_commitment;
5092 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5093 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5094 self.context.monitor_pending_failures.append(&mut pending_fails);
5095 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5096 self.context.channel_state.set_monitor_update_in_progress();
5099 /// Indicates that the latest ChannelMonitor update has been committed by the client
5100 /// successfully and we should restore normal operation. Returns messages which should be sent
5101 /// to the remote side.
5102 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5103 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5104 user_config: &UserConfig, best_block_height: u32
5105 ) -> MonitorRestoreUpdates
5108 NS::Target: NodeSigner
5110 assert!(self.context.channel_state.is_monitor_update_in_progress());
5111 self.context.channel_state.clear_monitor_update_in_progress();
5113 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5114 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5115 // first received the funding_signed.
5116 let mut funding_broadcastable =
5117 if self.context.is_outbound() &&
5118 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5119 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5121 self.context.funding_transaction.take()
5123 // That said, if the funding transaction is already confirmed (ie we're active with a
5124 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5125 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5126 funding_broadcastable = None;
5129 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5130 // (and we assume the user never directly broadcasts the funding transaction and waits for
5131 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5132 // * an inbound channel that failed to persist the monitor on funding_created and we got
5133 // the funding transaction confirmed before the monitor was persisted, or
5134 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5135 let channel_ready = if self.context.monitor_pending_channel_ready {
5136 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5137 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5138 self.context.monitor_pending_channel_ready = false;
5139 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5140 Some(msgs::ChannelReady {
5141 channel_id: self.context.channel_id(),
5142 next_per_commitment_point,
5143 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5147 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5149 let mut accepted_htlcs = Vec::new();
5150 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5151 let mut failed_htlcs = Vec::new();
5152 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5153 let mut finalized_claimed_htlcs = Vec::new();
5154 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5156 if self.context.channel_state.is_peer_disconnected() {
5157 self.context.monitor_pending_revoke_and_ack = false;
5158 self.context.monitor_pending_commitment_signed = false;
5159 return MonitorRestoreUpdates {
5160 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5161 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5165 let raa = if self.context.monitor_pending_revoke_and_ack {
5166 Some(self.get_last_revoke_and_ack())
5168 let commitment_update = if self.context.monitor_pending_commitment_signed {
5169 self.get_last_commitment_update_for_send(logger).ok()
5171 if commitment_update.is_some() {
5172 self.mark_awaiting_response();
5175 self.context.monitor_pending_revoke_and_ack = false;
5176 self.context.monitor_pending_commitment_signed = false;
5177 let order = self.context.resend_order.clone();
5178 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5179 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5180 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5181 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5182 MonitorRestoreUpdates {
5183 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5187 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5188 where F::Target: FeeEstimator, L::Target: Logger
5190 if self.context.is_outbound() {
5191 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5193 if self.context.channel_state.is_peer_disconnected() {
5194 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5196 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5198 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5199 self.context.update_time_counter += 1;
5200 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5201 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
5202 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
5203 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
5204 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5205 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5206 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5207 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5208 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5209 msg.feerate_per_kw, holder_tx_dust_exposure)));
5211 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5212 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5213 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
5219 /// Indicates that the signer may have some signatures for us, so we should retry if we're
5221 #[cfg(async_signing)]
5222 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5223 let commitment_update = if self.context.signer_pending_commitment_update {
5224 self.get_last_commitment_update_for_send(logger).ok()
5226 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5227 self.context.get_funding_signed_msg(logger).1
5229 let channel_ready = if funding_signed.is_some() {
5230 self.check_get_channel_ready(0)
5233 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5234 if commitment_update.is_some() { "a" } else { "no" },
5235 if funding_signed.is_some() { "a" } else { "no" },
5236 if channel_ready.is_some() { "a" } else { "no" });
5238 SignerResumeUpdates {
5245 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5246 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5247 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5248 msgs::RevokeAndACK {
5249 channel_id: self.context.channel_id,
5250 per_commitment_secret,
5251 next_per_commitment_point,
5253 next_local_nonce: None,
5257 /// Gets the last commitment update for immediate sending to our peer.
5258 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5259 let mut update_add_htlcs = Vec::new();
5260 let mut update_fulfill_htlcs = Vec::new();
5261 let mut update_fail_htlcs = Vec::new();
5262 let mut update_fail_malformed_htlcs = Vec::new();
5264 for htlc in self.context.pending_outbound_htlcs.iter() {
5265 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5266 update_add_htlcs.push(msgs::UpdateAddHTLC {
5267 channel_id: self.context.channel_id(),
5268 htlc_id: htlc.htlc_id,
5269 amount_msat: htlc.amount_msat,
5270 payment_hash: htlc.payment_hash,
5271 cltv_expiry: htlc.cltv_expiry,
5272 onion_routing_packet: (**onion_packet).clone(),
5273 skimmed_fee_msat: htlc.skimmed_fee_msat,
5274 blinding_point: htlc.blinding_point,
5279 for htlc in self.context.pending_inbound_htlcs.iter() {
5280 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5282 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5283 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5284 channel_id: self.context.channel_id(),
5285 htlc_id: htlc.htlc_id,
5286 reason: err_packet.clone()
5289 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5290 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5291 channel_id: self.context.channel_id(),
5292 htlc_id: htlc.htlc_id,
5293 sha256_of_onion: sha256_of_onion.clone(),
5294 failure_code: failure_code.clone(),
5297 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5298 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5299 channel_id: self.context.channel_id(),
5300 htlc_id: htlc.htlc_id,
5301 payment_preimage: payment_preimage.clone(),
5308 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5309 Some(msgs::UpdateFee {
5310 channel_id: self.context.channel_id(),
5311 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5315 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5316 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5317 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5318 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5319 if self.context.signer_pending_commitment_update {
5320 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5321 self.context.signer_pending_commitment_update = false;
5325 #[cfg(not(async_signing))] {
5326 panic!("Failed to get signature for new commitment state");
5328 #[cfg(async_signing)] {
5329 if !self.context.signer_pending_commitment_update {
5330 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5331 self.context.signer_pending_commitment_update = true;
5336 Ok(msgs::CommitmentUpdate {
5337 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5342 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5343 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5344 if self.context.channel_state.is_local_shutdown_sent() {
5345 assert!(self.context.shutdown_scriptpubkey.is_some());
5346 Some(msgs::Shutdown {
5347 channel_id: self.context.channel_id,
5348 scriptpubkey: self.get_closing_scriptpubkey(),
5353 /// May panic if some calls other than message-handling calls (which will all Err immediately)
5354 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5356 /// Some links printed in log lines are included here to check them during build (when run with
5357 /// `cargo doc --document-private-items`):
5358 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5359 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5360 pub fn channel_reestablish<L: Deref, NS: Deref>(
5361 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5362 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5363 ) -> Result<ReestablishResponses, ChannelError>
5366 NS::Target: NodeSigner
5368 if !self.context.channel_state.is_peer_disconnected() {
5369 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5370 // almost certainly indicates we are going to end up out-of-sync in some way, so we
5371 // just close here instead of trying to recover.
5372 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5375 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5376 msg.next_local_commitment_number == 0 {
5377 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5380 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5381 if msg.next_remote_commitment_number > 0 {
5382 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5383 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5384 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5385 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5386 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5388 if msg.next_remote_commitment_number > our_commitment_transaction {
5389 macro_rules! log_and_panic {
5390 ($err_msg: expr) => {
5391 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5392 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5395 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5396 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5397 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5398 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5399 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5400 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5401 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5402 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5406 // Before we change the state of the channel, we check if the peer is sending a very old
5407 // commitment transaction number, if yes we send a warning message.
5408 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5409 return Err(ChannelError::Warn(format!(
5410 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5411 msg.next_remote_commitment_number,
5412 our_commitment_transaction
5416 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5417 // remaining cases either succeed or ErrorMessage-fail).
5418 self.context.channel_state.clear_peer_disconnected();
5419 self.context.sent_message_awaiting_response = None;
5421 let shutdown_msg = self.get_outbound_shutdown();
5423 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
5425 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5426 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5427 if !self.context.channel_state.is_our_channel_ready() ||
5428 self.context.channel_state.is_monitor_update_in_progress() {
5429 if msg.next_remote_commitment_number != 0 {
5430 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5432 // Short circuit the whole handler as there is nothing we can resend them
5433 return Ok(ReestablishResponses {
5434 channel_ready: None,
5435 raa: None, commitment_update: None,
5436 order: RAACommitmentOrder::CommitmentFirst,
5437 shutdown_msg, announcement_sigs,
5441 // We have OurChannelReady set!
5442 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5443 return Ok(ReestablishResponses {
5444 channel_ready: Some(msgs::ChannelReady {
5445 channel_id: self.context.channel_id(),
5446 next_per_commitment_point,
5447 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5449 raa: None, commitment_update: None,
5450 order: RAACommitmentOrder::CommitmentFirst,
5451 shutdown_msg, announcement_sigs,
5455 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5456 // Remote isn't waiting on any RevokeAndACK from us!
5457 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5459 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5460 if self.context.channel_state.is_monitor_update_in_progress() {
5461 self.context.monitor_pending_revoke_and_ack = true;
5464 Some(self.get_last_revoke_and_ack())
5467 debug_assert!(false, "All values should have been handled in the four cases above");
5468 return Err(ChannelError::Close(format!(
5469 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5470 msg.next_remote_commitment_number,
5471 our_commitment_transaction
5475 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5476 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5477 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5478 // the corresponding revoke_and_ack back yet.
5479 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5480 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5481 self.mark_awaiting_response();
5483 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5485 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5486 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5487 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5488 Some(msgs::ChannelReady {
5489 channel_id: self.context.channel_id(),
5490 next_per_commitment_point,
5491 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5495 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5496 if required_revoke.is_some() {
5497 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5499 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5502 Ok(ReestablishResponses {
5503 channel_ready, shutdown_msg, announcement_sigs,
5504 raa: required_revoke,
5505 commitment_update: None,
5506 order: self.context.resend_order.clone(),
5508 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5509 if required_revoke.is_some() {
5510 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5512 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5515 if self.context.channel_state.is_monitor_update_in_progress() {
5516 self.context.monitor_pending_commitment_signed = true;
5517 Ok(ReestablishResponses {
5518 channel_ready, shutdown_msg, announcement_sigs,
5519 commitment_update: None, raa: None,
5520 order: self.context.resend_order.clone(),
5523 Ok(ReestablishResponses {
5524 channel_ready, shutdown_msg, announcement_sigs,
5525 raa: required_revoke,
5526 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5527 order: self.context.resend_order.clone(),
5530 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5531 Err(ChannelError::Close(format!(
5532 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5533 msg.next_local_commitment_number,
5534 next_counterparty_commitment_number,
5537 Err(ChannelError::Close(format!(
5538 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5539 msg.next_local_commitment_number,
5540 next_counterparty_commitment_number,
5545 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5546 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5547 /// at which point they will be recalculated.
5548 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5550 where F::Target: FeeEstimator
5552 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5554 // Propose a range from our current Background feerate to our Normal feerate plus our
5555 // force_close_avoidance_max_fee_satoshis.
5556 // If we fail to come to consensus, we'll have to force-close.
5557 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5558 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5559 // that we don't expect to need fee bumping
5560 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5561 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5563 // The spec requires that (when the channel does not have anchors) we only send absolute
5564 // channel fees no greater than the absolute channel fee on the current commitment
5565 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5566 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5567 // some force-closure by old nodes, but we wanted to close the channel anyway.
5569 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5570 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5571 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5572 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5575 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5576 // below our dust limit, causing the output to disappear. We don't bother handling this
5577 // case, however, as this should only happen if a channel is closed before any (material)
5578 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5579 // come to consensus with our counterparty on appropriate fees, however it should be a
5580 // relatively rare case. We can revisit this later, though note that in order to determine
5581 // if the funders' output is dust we have to know the absolute fee we're going to use.
5582 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5583 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5584 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5585 // We always add force_close_avoidance_max_fee_satoshis to our normal
5586 // feerate-calculated fee, but allow the max to be overridden if we're using a
5587 // target feerate-calculated fee.
5588 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5589 proposed_max_feerate as u64 * tx_weight / 1000)
5591 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5594 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5595 self.context.closing_fee_limits.clone().unwrap()
5598 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5599 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5600 /// this point if we're the funder we should send the initial closing_signed, and in any case
5601 /// shutdown should complete within a reasonable timeframe.
5602 fn closing_negotiation_ready(&self) -> bool {
5603 self.context.closing_negotiation_ready()
5606 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5607 /// an Err if no progress is being made and the channel should be force-closed instead.
5608 /// Should be called on a one-minute timer.
5609 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5610 if self.closing_negotiation_ready() {
5611 if self.context.closing_signed_in_flight {
5612 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5614 self.context.closing_signed_in_flight = true;
5620 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5621 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5622 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5623 where F::Target: FeeEstimator, L::Target: Logger
5625 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5626 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5627 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5628 // that closing_negotiation_ready checks this case (as well as a few others).
5629 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5630 return Ok((None, None, None));
5633 if !self.context.is_outbound() {
5634 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5635 return self.closing_signed(fee_estimator, &msg);
5637 return Ok((None, None, None));
5640 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5641 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5642 if self.context.expecting_peer_commitment_signed {
5643 return Ok((None, None, None));
5646 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5648 assert!(self.context.shutdown_scriptpubkey.is_some());
5649 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5650 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5651 our_min_fee, our_max_fee, total_fee_satoshis);
5653 match &self.context.holder_signer {
5654 ChannelSignerType::Ecdsa(ecdsa) => {
5656 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5657 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5659 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5660 Ok((Some(msgs::ClosingSigned {
5661 channel_id: self.context.channel_id,
5662 fee_satoshis: total_fee_satoshis,
5664 fee_range: Some(msgs::ClosingSignedFeeRange {
5665 min_fee_satoshis: our_min_fee,
5666 max_fee_satoshis: our_max_fee,
5670 // TODO (taproot|arik)
5676 // Marks a channel as waiting for a response from the counterparty. If it's not received
5677 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5679 fn mark_awaiting_response(&mut self) {
5680 self.context.sent_message_awaiting_response = Some(0);
5683 /// Determines whether we should disconnect the counterparty due to not receiving a response
5684 /// within our expected timeframe.
5686 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5687 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5688 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5691 // Don't disconnect when we're not waiting on a response.
5694 *ticks_elapsed += 1;
5695 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5699 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5700 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5702 if self.context.channel_state.is_peer_disconnected() {
5703 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5705 if self.context.channel_state.is_pre_funded_state() {
5706 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5707 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5708 // can do that via error message without getting a connection fail anyway...
5709 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5711 for htlc in self.context.pending_inbound_htlcs.iter() {
5712 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5713 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5716 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5718 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5719 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5722 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5723 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5724 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5727 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5730 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5731 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5732 // any further commitment updates after we set LocalShutdownSent.
5733 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5735 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5738 assert!(send_shutdown);
5739 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5740 Ok(scriptpubkey) => scriptpubkey,
5741 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5743 if !shutdown_scriptpubkey.is_compatible(their_features) {
5744 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5746 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5751 // From here on out, we may not fail!
5753 self.context.channel_state.set_remote_shutdown_sent();
5754 self.context.update_time_counter += 1;
5756 let monitor_update = if update_shutdown_script {
5757 self.context.latest_monitor_update_id += 1;
5758 let monitor_update = ChannelMonitorUpdate {
5759 update_id: self.context.latest_monitor_update_id,
5760 counterparty_node_id: Some(self.context.counterparty_node_id),
5761 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5762 scriptpubkey: self.get_closing_scriptpubkey(),
5764 channel_id: Some(self.context.channel_id()),
5766 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5767 self.push_ret_blockable_mon_update(monitor_update)
5769 let shutdown = if send_shutdown {
5770 Some(msgs::Shutdown {
5771 channel_id: self.context.channel_id,
5772 scriptpubkey: self.get_closing_scriptpubkey(),
5776 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5777 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5778 // cell HTLCs and return them to fail the payment.
5779 self.context.holding_cell_update_fee = None;
5780 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5781 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5783 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5784 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5791 self.context.channel_state.set_local_shutdown_sent();
5792 self.context.update_time_counter += 1;
5794 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5797 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5798 let mut tx = closing_tx.trust().built_transaction().clone();
5800 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5802 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5803 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5804 let mut holder_sig = sig.serialize_der().to_vec();
5805 holder_sig.push(EcdsaSighashType::All as u8);
5806 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5807 cp_sig.push(EcdsaSighashType::All as u8);
5808 if funding_key[..] < counterparty_funding_key[..] {
5809 tx.input[0].witness.push(holder_sig);
5810 tx.input[0].witness.push(cp_sig);
5812 tx.input[0].witness.push(cp_sig);
5813 tx.input[0].witness.push(holder_sig);
5816 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5820 pub fn closing_signed<F: Deref>(
5821 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5822 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5823 where F::Target: FeeEstimator
5825 if !self.context.channel_state.is_both_sides_shutdown() {
5826 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5828 if self.context.channel_state.is_peer_disconnected() {
5829 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5831 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5832 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5834 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5835 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5838 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5839 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5842 if self.context.channel_state.is_monitor_update_in_progress() {
5843 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5844 return Ok((None, None, None));
5847 let funding_redeemscript = self.context.get_funding_redeemscript();
5848 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5849 if used_total_fee != msg.fee_satoshis {
5850 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5852 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5854 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5857 // The remote end may have decided to revoke their output due to inconsistent dust
5858 // limits, so check for that case by re-checking the signature here.
5859 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5860 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5861 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5865 for outp in closing_tx.trust().built_transaction().output.iter() {
5866 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5867 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5871 let closure_reason = if self.initiated_shutdown() {
5872 ClosureReason::LocallyInitiatedCooperativeClosure
5874 ClosureReason::CounterpartyInitiatedCooperativeClosure
5877 assert!(self.context.shutdown_scriptpubkey.is_some());
5878 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5879 if last_fee == msg.fee_satoshis {
5880 let shutdown_result = ShutdownResult {
5882 monitor_update: None,
5883 dropped_outbound_htlcs: Vec::new(),
5884 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5885 channel_id: self.context.channel_id,
5886 user_channel_id: self.context.user_id,
5887 channel_capacity_satoshis: self.context.channel_value_satoshis,
5888 counterparty_node_id: self.context.counterparty_node_id,
5889 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5890 channel_funding_txo: self.context.get_funding_txo(),
5892 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5893 self.context.channel_state = ChannelState::ShutdownComplete;
5894 self.context.update_time_counter += 1;
5895 return Ok((None, Some(tx), Some(shutdown_result)));
5899 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5901 macro_rules! propose_fee {
5902 ($new_fee: expr) => {
5903 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5904 (closing_tx, $new_fee)
5906 self.build_closing_transaction($new_fee, false)
5909 return match &self.context.holder_signer {
5910 ChannelSignerType::Ecdsa(ecdsa) => {
5912 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5913 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5914 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5915 let shutdown_result = ShutdownResult {
5917 monitor_update: None,
5918 dropped_outbound_htlcs: Vec::new(),
5919 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5920 channel_id: self.context.channel_id,
5921 user_channel_id: self.context.user_id,
5922 channel_capacity_satoshis: self.context.channel_value_satoshis,
5923 counterparty_node_id: self.context.counterparty_node_id,
5924 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5925 channel_funding_txo: self.context.get_funding_txo(),
5927 self.context.channel_state = ChannelState::ShutdownComplete;
5928 self.context.update_time_counter += 1;
5929 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5930 (Some(tx), Some(shutdown_result))
5935 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5936 Ok((Some(msgs::ClosingSigned {
5937 channel_id: self.context.channel_id,
5938 fee_satoshis: used_fee,
5940 fee_range: Some(msgs::ClosingSignedFeeRange {
5941 min_fee_satoshis: our_min_fee,
5942 max_fee_satoshis: our_max_fee,
5944 }), signed_tx, shutdown_result))
5946 // TODO (taproot|arik)
5953 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5954 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5955 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5957 if max_fee_satoshis < our_min_fee {
5958 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5960 if min_fee_satoshis > our_max_fee {
5961 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5964 if !self.context.is_outbound() {
5965 // They have to pay, so pick the highest fee in the overlapping range.
5966 // We should never set an upper bound aside from their full balance
5967 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5968 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5970 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5971 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5972 msg.fee_satoshis, our_min_fee, our_max_fee)));
5974 // The proposed fee is in our acceptable range, accept it and broadcast!
5975 propose_fee!(msg.fee_satoshis);
5978 // Old fee style negotiation. We don't bother to enforce whether they are complying
5979 // with the "making progress" requirements, we just comply and hope for the best.
5980 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5981 if msg.fee_satoshis > last_fee {
5982 if msg.fee_satoshis < our_max_fee {
5983 propose_fee!(msg.fee_satoshis);
5984 } else if last_fee < our_max_fee {
5985 propose_fee!(our_max_fee);
5987 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5990 if msg.fee_satoshis > our_min_fee {
5991 propose_fee!(msg.fee_satoshis);
5992 } else if last_fee > our_min_fee {
5993 propose_fee!(our_min_fee);
5995 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5999 if msg.fee_satoshis < our_min_fee {
6000 propose_fee!(our_min_fee);
6001 } else if msg.fee_satoshis > our_max_fee {
6002 propose_fee!(our_max_fee);
6004 propose_fee!(msg.fee_satoshis);
6010 fn internal_htlc_satisfies_config(
6011 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6012 ) -> Result<(), (&'static str, u16)> {
6013 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6014 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6015 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6016 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6018 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6019 0x1000 | 12, // fee_insufficient
6022 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6024 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6025 0x1000 | 13, // incorrect_cltv_expiry
6031 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6032 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6033 /// unsuccessful, falls back to the previous one if one exists.
6034 pub fn htlc_satisfies_config(
6035 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6036 ) -> Result<(), (&'static str, u16)> {
6037 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6039 if let Some(prev_config) = self.context.prev_config() {
6040 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6047 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6048 self.context.cur_holder_commitment_transaction_number + 1
6051 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6052 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6055 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6056 self.context.cur_counterparty_commitment_transaction_number + 2
6060 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6061 &self.context.holder_signer
6065 pub fn get_value_stat(&self) -> ChannelValueStat {
6067 value_to_self_msat: self.context.value_to_self_msat,
6068 channel_value_msat: self.context.channel_value_satoshis * 1000,
6069 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6070 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6071 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6072 holding_cell_outbound_amount_msat: {
6074 for h in self.context.holding_cell_htlc_updates.iter() {
6076 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6084 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6085 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6089 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6090 /// Allowed in any state (including after shutdown)
6091 pub fn is_awaiting_monitor_update(&self) -> bool {
6092 self.context.channel_state.is_monitor_update_in_progress()
6095 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6096 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6097 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6098 self.context.blocked_monitor_updates[0].update.update_id - 1
6101 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6102 /// further blocked monitor update exists after the next.
6103 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6104 if self.context.blocked_monitor_updates.is_empty() { return None; }
6105 Some((self.context.blocked_monitor_updates.remove(0).update,
6106 !self.context.blocked_monitor_updates.is_empty()))
6109 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6110 /// immediately given to the user for persisting or `None` if it should be held as blocked.
6111 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6112 -> Option<ChannelMonitorUpdate> {
6113 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6114 if !release_monitor {
6115 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6124 pub fn blocked_monitor_updates_pending(&self) -> usize {
6125 self.context.blocked_monitor_updates.len()
6128 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6129 /// If the channel is outbound, this implies we have not yet broadcasted the funding
6130 /// transaction. If the channel is inbound, this implies simply that the channel has not
6132 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6133 if !self.is_awaiting_monitor_update() { return false; }
6135 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6136 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6138 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6139 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6140 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6143 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6144 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6145 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6146 // waiting for the initial monitor persistence. Thus, we check if our commitment
6147 // transaction numbers have both been iterated only exactly once (for the
6148 // funding_signed), and we're awaiting monitor update.
6150 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6151 // only way to get an awaiting-monitor-update state during initial funding is if the
6152 // initial monitor persistence is still pending).
6154 // Because deciding we're awaiting initial broadcast spuriously could result in
6155 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6156 // we hard-assert here, even in production builds.
6157 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6158 assert!(self.context.monitor_pending_channel_ready);
6159 assert_eq!(self.context.latest_monitor_update_id, 0);
6165 /// Returns true if our channel_ready has been sent
6166 pub fn is_our_channel_ready(&self) -> bool {
6167 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6168 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6171 /// Returns true if our peer has either initiated or agreed to shut down the channel.
6172 pub fn received_shutdown(&self) -> bool {
6173 self.context.channel_state.is_remote_shutdown_sent()
6176 /// Returns true if we either initiated or agreed to shut down the channel.
6177 pub fn sent_shutdown(&self) -> bool {
6178 self.context.channel_state.is_local_shutdown_sent()
6181 /// Returns true if we initiated to shut down the channel.
6182 pub fn initiated_shutdown(&self) -> bool {
6183 self.context.local_initiated_shutdown.is_some()
6186 /// Returns true if this channel is fully shut down. True here implies that no further actions
6187 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6188 /// will be handled appropriately by the chain monitor.
6189 pub fn is_shutdown(&self) -> bool {
6190 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6193 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6194 self.context.channel_update_status
6197 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6198 self.context.update_time_counter += 1;
6199 self.context.channel_update_status = status;
6202 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6204 // * always when a new block/transactions are confirmed with the new height
6205 // * when funding is signed with a height of 0
6206 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6210 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6211 if funding_tx_confirmations <= 0 {
6212 self.context.funding_tx_confirmation_height = 0;
6215 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6219 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6220 // channel_ready yet.
6221 if self.context.signer_pending_funding {
6225 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6226 // channel_ready until the entire batch is ready.
6227 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6228 self.context.channel_state.set_our_channel_ready();
6230 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6231 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6232 self.context.update_time_counter += 1;
6234 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6235 // We got a reorg but not enough to trigger a force close, just ignore.
6238 if self.context.funding_tx_confirmation_height != 0 &&
6239 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6241 // We should never see a funding transaction on-chain until we've received
6242 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6243 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6244 // however, may do this and we shouldn't treat it as a bug.
6245 #[cfg(not(fuzzing))]
6246 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6247 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6248 self.context.channel_state.to_u32());
6250 // We got a reorg but not enough to trigger a force close, just ignore.
6254 if need_commitment_update {
6255 if !self.context.channel_state.is_monitor_update_in_progress() {
6256 if !self.context.channel_state.is_peer_disconnected() {
6257 let next_per_commitment_point =
6258 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6259 return Some(msgs::ChannelReady {
6260 channel_id: self.context.channel_id,
6261 next_per_commitment_point,
6262 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6266 self.context.monitor_pending_channel_ready = true;
6272 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6273 /// In the first case, we store the confirmation height and calculating the short channel id.
6274 /// In the second, we simply return an Err indicating we need to be force-closed now.
6275 pub fn transactions_confirmed<NS: Deref, L: Deref>(
6276 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6277 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6278 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6280 NS::Target: NodeSigner,
6283 let mut msgs = (None, None);
6284 if let Some(funding_txo) = self.context.get_funding_txo() {
6285 for &(index_in_block, tx) in txdata.iter() {
6286 // Check if the transaction is the expected funding transaction, and if it is,
6287 // check that it pays the right amount to the right script.
6288 if self.context.funding_tx_confirmation_height == 0 {
6289 if tx.txid() == funding_txo.txid {
6290 let txo_idx = funding_txo.index as usize;
6291 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6292 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6293 if self.context.is_outbound() {
6294 // If we generated the funding transaction and it doesn't match what it
6295 // should, the client is really broken and we should just panic and
6296 // tell them off. That said, because hash collisions happen with high
6297 // probability in fuzzing mode, if we're fuzzing we just close the
6298 // channel and move on.
6299 #[cfg(not(fuzzing))]
6300 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6302 self.context.update_time_counter += 1;
6303 let err_reason = "funding tx had wrong script/value or output index";
6304 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6306 if self.context.is_outbound() {
6307 if !tx.is_coin_base() {
6308 for input in tx.input.iter() {
6309 if input.witness.is_empty() {
6310 // We generated a malleable funding transaction, implying we've
6311 // just exposed ourselves to funds loss to our counterparty.
6312 #[cfg(not(fuzzing))]
6313 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6318 self.context.funding_tx_confirmation_height = height;
6319 self.context.funding_tx_confirmed_in = Some(*block_hash);
6320 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6321 Ok(scid) => Some(scid),
6322 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6325 // If this is a coinbase transaction and not a 0-conf channel
6326 // we should update our min_depth to 100 to handle coinbase maturity
6327 if tx.is_coin_base() &&
6328 self.context.minimum_depth.unwrap_or(0) > 0 &&
6329 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6330 self.context.minimum_depth = Some(COINBASE_MATURITY);
6333 // If we allow 1-conf funding, we may need to check for channel_ready here and
6334 // send it immediately instead of waiting for a best_block_updated call (which
6335 // may have already happened for this block).
6336 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6337 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6338 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6339 msgs = (Some(channel_ready), announcement_sigs);
6342 for inp in tx.input.iter() {
6343 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6344 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6345 return Err(ClosureReason::CommitmentTxConfirmed);
6353 /// When a new block is connected, we check the height of the block against outbound holding
6354 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6355 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6356 /// handled by the ChannelMonitor.
6358 /// If we return Err, the channel may have been closed, at which point the standard
6359 /// requirements apply - no calls may be made except those explicitly stated to be allowed
6362 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6364 pub fn best_block_updated<NS: Deref, L: Deref>(
6365 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6366 node_signer: &NS, user_config: &UserConfig, logger: &L
6367 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6369 NS::Target: NodeSigner,
6372 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6375 fn do_best_block_updated<NS: Deref, L: Deref>(
6376 &mut self, height: u32, highest_header_time: u32,
6377 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6378 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6380 NS::Target: NodeSigner,
6383 let mut timed_out_htlcs = Vec::new();
6384 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6385 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6387 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6388 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6390 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6391 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6392 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6400 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6402 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6403 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6404 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6406 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6407 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6410 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6411 self.context.channel_state.is_our_channel_ready() {
6412 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6413 if self.context.funding_tx_confirmation_height == 0 {
6414 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6415 // zero if it has been reorged out, however in either case, our state flags
6416 // indicate we've already sent a channel_ready
6417 funding_tx_confirmations = 0;
6420 // If we've sent channel_ready (or have both sent and received channel_ready), and
6421 // the funding transaction has become unconfirmed,
6422 // close the channel and hope we can get the latest state on chain (because presumably
6423 // the funding transaction is at least still in the mempool of most nodes).
6425 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6426 // 0-conf channel, but not doing so may lead to the
6427 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
6429 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6430 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6431 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6432 return Err(ClosureReason::ProcessingError { err: err_reason });
6434 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6435 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6436 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6437 // If funding_tx_confirmed_in is unset, the channel must not be active
6438 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6439 assert!(!self.context.channel_state.is_our_channel_ready());
6440 return Err(ClosureReason::FundingTimedOut);
6443 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6444 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6446 Ok((None, timed_out_htlcs, announcement_sigs))
6449 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6450 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6451 /// before the channel has reached channel_ready and we can just wait for more blocks.
6452 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6453 if self.context.funding_tx_confirmation_height != 0 {
6454 // We handle the funding disconnection by calling best_block_updated with a height one
6455 // below where our funding was connected, implying a reorg back to conf_height - 1.
6456 let reorg_height = self.context.funding_tx_confirmation_height - 1;
6457 // We use the time field to bump the current time we set on channel updates if its
6458 // larger. If we don't know that time has moved forward, we can just set it to the last
6459 // time we saw and it will be ignored.
6460 let best_time = self.context.update_time_counter;
6461 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6462 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6463 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6464 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6465 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6471 // We never learned about the funding confirmation anyway, just ignore
6476 // Methods to get unprompted messages to send to the remote end (or where we already returned
6477 // something in the handler for the message that prompted this message):
6479 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6480 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6481 /// directions). Should be used for both broadcasted announcements and in response to an
6482 /// AnnouncementSignatures message from the remote peer.
6484 /// Will only fail if we're not in a state where channel_announcement may be sent (including
6487 /// This will only return ChannelError::Ignore upon failure.
6489 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6490 fn get_channel_announcement<NS: Deref>(
6491 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6492 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6493 if !self.context.config.announced_channel {
6494 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6496 if !self.context.is_usable() {
6497 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6500 let short_channel_id = self.context.get_short_channel_id()
6501 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6502 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6503 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6504 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6505 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6507 let msg = msgs::UnsignedChannelAnnouncement {
6508 features: channelmanager::provided_channel_features(&user_config),
6511 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6512 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6513 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6514 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6515 excess_data: Vec::new(),
6521 fn get_announcement_sigs<NS: Deref, L: Deref>(
6522 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6523 best_block_height: u32, logger: &L
6524 ) -> Option<msgs::AnnouncementSignatures>
6526 NS::Target: NodeSigner,
6529 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6533 if !self.context.is_usable() {
6537 if self.context.channel_state.is_peer_disconnected() {
6538 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6542 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6546 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6547 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6550 log_trace!(logger, "{:?}", e);
6554 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6556 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6561 match &self.context.holder_signer {
6562 ChannelSignerType::Ecdsa(ecdsa) => {
6563 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6565 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6570 let short_channel_id = match self.context.get_short_channel_id() {
6572 None => return None,
6575 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6577 Some(msgs::AnnouncementSignatures {
6578 channel_id: self.context.channel_id(),
6580 node_signature: our_node_sig,
6581 bitcoin_signature: our_bitcoin_sig,
6584 // TODO (taproot|arik)
6590 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6592 fn sign_channel_announcement<NS: Deref>(
6593 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6594 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6595 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6596 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6597 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6598 let were_node_one = announcement.node_id_1 == our_node_key;
6600 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6601 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6602 match &self.context.holder_signer {
6603 ChannelSignerType::Ecdsa(ecdsa) => {
6604 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6605 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6606 Ok(msgs::ChannelAnnouncement {
6607 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6608 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6609 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6610 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6611 contents: announcement,
6614 // TODO (taproot|arik)
6619 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6623 /// Processes an incoming announcement_signatures message, providing a fully-signed
6624 /// channel_announcement message which we can broadcast and storing our counterparty's
6625 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6626 pub fn announcement_signatures<NS: Deref>(
6627 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6628 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6629 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6630 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6632 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6634 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6635 return Err(ChannelError::Close(format!(
6636 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6637 &announcement, self.context.get_counterparty_node_id())));
6639 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6640 return Err(ChannelError::Close(format!(
6641 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6642 &announcement, self.context.counterparty_funding_pubkey())));
6645 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6646 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6647 return Err(ChannelError::Ignore(
6648 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6651 self.sign_channel_announcement(node_signer, announcement)
6654 /// Gets a signed channel_announcement for this channel, if we previously received an
6655 /// announcement_signatures from our counterparty.
6656 pub fn get_signed_channel_announcement<NS: Deref>(
6657 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6658 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6659 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6662 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6664 Err(_) => return None,
6666 match self.sign_channel_announcement(node_signer, announcement) {
6667 Ok(res) => Some(res),
6672 /// May panic if called on a channel that wasn't immediately-previously
6673 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6674 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6675 assert!(self.context.channel_state.is_peer_disconnected());
6676 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6677 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6678 // current to_remote balances. However, it no longer has any use, and thus is now simply
6679 // set to a dummy (but valid, as required by the spec) public key.
6680 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6681 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6682 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6683 let mut pk = [2; 33]; pk[1] = 0xff;
6684 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6685 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6686 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6687 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6690 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6693 self.mark_awaiting_response();
6694 msgs::ChannelReestablish {
6695 channel_id: self.context.channel_id(),
6696 // The protocol has two different commitment number concepts - the "commitment
6697 // transaction number", which starts from 0 and counts up, and the "revocation key
6698 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6699 // commitment transaction numbers by the index which will be used to reveal the
6700 // revocation key for that commitment transaction, which means we have to convert them
6701 // to protocol-level commitment numbers here...
6703 // next_local_commitment_number is the next commitment_signed number we expect to
6704 // receive (indicating if they need to resend one that we missed).
6705 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6706 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6707 // receive, however we track it by the next commitment number for a remote transaction
6708 // (which is one further, as they always revoke previous commitment transaction, not
6709 // the one we send) so we have to decrement by 1. Note that if
6710 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6711 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6713 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6714 your_last_per_commitment_secret: remote_last_secret,
6715 my_current_per_commitment_point: dummy_pubkey,
6716 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6717 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6718 // txid of that interactive transaction, else we MUST NOT set it.
6719 next_funding_txid: None,
6724 // Send stuff to our remote peers:
6726 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6727 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6728 /// commitment update.
6730 /// `Err`s will only be [`ChannelError::Ignore`].
6731 pub fn queue_add_htlc<F: Deref, L: Deref>(
6732 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6733 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6734 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6735 ) -> Result<(), ChannelError>
6736 where F::Target: FeeEstimator, L::Target: Logger
6739 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6740 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6741 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6743 if let ChannelError::Ignore(_) = err { /* fine */ }
6744 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6749 /// Adds a pending outbound HTLC to this channel, note that you probably want
6750 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6752 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6754 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6755 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6757 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6758 /// we may not yet have sent the previous commitment update messages and will need to
6759 /// regenerate them.
6761 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6762 /// on this [`Channel`] if `force_holding_cell` is false.
6764 /// `Err`s will only be [`ChannelError::Ignore`].
6765 fn send_htlc<F: Deref, L: Deref>(
6766 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6767 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6768 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6769 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6770 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6771 where F::Target: FeeEstimator, L::Target: Logger
6773 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6774 self.context.channel_state.is_local_shutdown_sent() ||
6775 self.context.channel_state.is_remote_shutdown_sent()
6777 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6779 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6780 if amount_msat > channel_total_msat {
6781 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6784 if amount_msat == 0 {
6785 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6788 let available_balances = self.context.get_available_balances(fee_estimator);
6789 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6790 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6791 available_balances.next_outbound_htlc_minimum_msat)));
6794 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6795 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6796 available_balances.next_outbound_htlc_limit_msat)));
6799 if self.context.channel_state.is_peer_disconnected() {
6800 // Note that this should never really happen, if we're !is_live() on receipt of an
6801 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6802 // the user to send directly into a !is_live() channel. However, if we
6803 // disconnected during the time the previous hop was doing the commitment dance we may
6804 // end up getting here after the forwarding delay. In any case, returning an
6805 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6806 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6809 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6810 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6811 payment_hash, amount_msat,
6812 if force_holding_cell { "into holding cell" }
6813 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6814 else { "to peer" });
6816 if need_holding_cell {
6817 force_holding_cell = true;
6820 // Now update local state:
6821 if force_holding_cell {
6822 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6827 onion_routing_packet,
6834 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6835 htlc_id: self.context.next_holder_htlc_id,
6837 payment_hash: payment_hash.clone(),
6839 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6845 let res = msgs::UpdateAddHTLC {
6846 channel_id: self.context.channel_id,
6847 htlc_id: self.context.next_holder_htlc_id,
6851 onion_routing_packet,
6855 self.context.next_holder_htlc_id += 1;
6860 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6861 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6862 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6863 // fail to generate this, we still are at least at a position where upgrading their status
6865 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6866 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6867 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6869 if let Some(state) = new_state {
6870 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6874 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6875 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6876 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6877 // Grab the preimage, if it exists, instead of cloning
6878 let mut reason = OutboundHTLCOutcome::Success(None);
6879 mem::swap(outcome, &mut reason);
6880 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6883 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6884 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6885 debug_assert!(!self.context.is_outbound());
6886 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6887 self.context.feerate_per_kw = feerate;
6888 self.context.pending_update_fee = None;
6891 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6893 let (mut htlcs_ref, counterparty_commitment_tx) =
6894 self.build_commitment_no_state_update(logger);
6895 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6896 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6897 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6899 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6900 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6903 self.context.latest_monitor_update_id += 1;
6904 let monitor_update = ChannelMonitorUpdate {
6905 update_id: self.context.latest_monitor_update_id,
6906 counterparty_node_id: Some(self.context.counterparty_node_id),
6907 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6908 commitment_txid: counterparty_commitment_txid,
6909 htlc_outputs: htlcs.clone(),
6910 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6911 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6912 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6913 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6914 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6916 channel_id: Some(self.context.channel_id()),
6918 self.context.channel_state.set_awaiting_remote_revoke();
6922 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6923 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6924 where L::Target: Logger
6926 let counterparty_keys = self.context.build_remote_transaction_keys();
6927 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6928 let counterparty_commitment_tx = commitment_stats.tx;
6930 #[cfg(any(test, fuzzing))]
6932 if !self.context.is_outbound() {
6933 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6934 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6935 if let Some(info) = projected_commit_tx_info {
6936 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6937 if info.total_pending_htlcs == total_pending_htlcs
6938 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6939 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6940 && info.feerate == self.context.feerate_per_kw {
6941 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6942 assert_eq!(actual_fee, info.fee);
6948 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6951 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6952 /// generation when we shouldn't change HTLC/channel state.
6953 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6954 // Get the fee tests from `build_commitment_no_state_update`
6955 #[cfg(any(test, fuzzing))]
6956 self.build_commitment_no_state_update(logger);
6958 let counterparty_keys = self.context.build_remote_transaction_keys();
6959 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6960 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6962 match &self.context.holder_signer {
6963 ChannelSignerType::Ecdsa(ecdsa) => {
6964 let (signature, htlc_signatures);
6967 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6968 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6972 let res = ecdsa.sign_counterparty_commitment(
6973 &commitment_stats.tx,
6974 commitment_stats.inbound_htlc_preimages,
6975 commitment_stats.outbound_htlc_preimages,
6976 &self.context.secp_ctx,
6977 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6979 htlc_signatures = res.1;
6981 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6982 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6983 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6984 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6986 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6987 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6988 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6989 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6990 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6991 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6995 Ok((msgs::CommitmentSigned {
6996 channel_id: self.context.channel_id,
7000 partial_signature_with_nonce: None,
7001 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7003 // TODO (taproot|arik)
7009 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7010 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7012 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7013 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7014 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7015 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7016 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7017 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7018 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7019 where F::Target: FeeEstimator, L::Target: Logger
7021 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7022 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7023 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7026 let monitor_update = self.build_commitment_no_status_check(logger);
7027 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7028 Ok(self.push_ret_blockable_mon_update(monitor_update))
7034 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7036 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7037 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7038 fee_base_msat: msg.contents.fee_base_msat,
7039 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7040 cltv_expiry_delta: msg.contents.cltv_expiry_delta
7042 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7044 self.context.counterparty_forwarding_info = new_forwarding_info;
7050 /// Begins the shutdown process, getting a message for the remote peer and returning all
7051 /// holding cell HTLCs for payment failure.
7052 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7053 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7054 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7056 for htlc in self.context.pending_outbound_htlcs.iter() {
7057 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7058 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7061 if self.context.channel_state.is_local_shutdown_sent() {
7062 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7064 else if self.context.channel_state.is_remote_shutdown_sent() {
7065 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7067 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7068 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7070 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7071 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7072 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7075 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7078 // use override shutdown script if provided
7079 let shutdown_scriptpubkey = match override_shutdown_script {
7080 Some(script) => script,
7082 // otherwise, use the shutdown scriptpubkey provided by the signer
7083 match signer_provider.get_shutdown_scriptpubkey() {
7084 Ok(scriptpubkey) => scriptpubkey,
7085 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7089 if !shutdown_scriptpubkey.is_compatible(their_features) {
7090 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7092 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7097 // From here on out, we may not fail!
7098 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7099 self.context.channel_state.set_local_shutdown_sent();
7100 self.context.local_initiated_shutdown = Some(());
7101 self.context.update_time_counter += 1;
7103 let monitor_update = if update_shutdown_script {
7104 self.context.latest_monitor_update_id += 1;
7105 let monitor_update = ChannelMonitorUpdate {
7106 update_id: self.context.latest_monitor_update_id,
7107 counterparty_node_id: Some(self.context.counterparty_node_id),
7108 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7109 scriptpubkey: self.get_closing_scriptpubkey(),
7111 channel_id: Some(self.context.channel_id()),
7113 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7114 self.push_ret_blockable_mon_update(monitor_update)
7116 let shutdown = msgs::Shutdown {
7117 channel_id: self.context.channel_id,
7118 scriptpubkey: self.get_closing_scriptpubkey(),
7121 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7122 // our shutdown until we've committed all of the pending changes.
7123 self.context.holding_cell_update_fee = None;
7124 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7125 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7127 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7128 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7135 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7136 "we can't both complete shutdown and return a monitor update");
7138 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7141 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7142 self.context.holding_cell_htlc_updates.iter()
7143 .flat_map(|htlc_update| {
7145 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7146 => Some((source, payment_hash)),
7150 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7154 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7155 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7156 pub context: ChannelContext<SP>,
7157 pub unfunded_context: UnfundedChannelContext,
7160 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7161 pub fn new<ES: Deref, F: Deref>(
7162 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7163 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7164 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7165 ) -> Result<OutboundV1Channel<SP>, APIError>
7166 where ES::Target: EntropySource,
7167 F::Target: FeeEstimator
7169 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7170 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7171 // Protocol level safety check in place, although it should never happen because
7172 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7173 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7174 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7177 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7178 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7179 let pubkeys = holder_signer.pubkeys().clone();
7182 context: ChannelContext::new_for_outbound_channel(
7186 counterparty_node_id,
7188 channel_value_satoshis,
7192 current_chain_height,
7193 outbound_scid_alias,
7194 temporary_channel_id,
7195 holder_selected_channel_reserve_satoshis,
7200 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7205 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7206 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7207 let counterparty_keys = self.context.build_remote_transaction_keys();
7208 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7209 let signature = match &self.context.holder_signer {
7210 // TODO (taproot|arik): move match into calling method for Taproot
7211 ChannelSignerType::Ecdsa(ecdsa) => {
7212 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7213 .map(|(sig, _)| sig).ok()?
7215 // TODO (taproot|arik)
7220 if self.context.signer_pending_funding {
7221 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7222 self.context.signer_pending_funding = false;
7225 Some(msgs::FundingCreated {
7226 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7227 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7228 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7231 partial_signature_with_nonce: None,
7233 next_local_nonce: None,
7237 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7238 /// a funding_created message for the remote peer.
7239 /// Panics if called at some time other than immediately after initial handshake, if called twice,
7240 /// or if called on an inbound channel.
7241 /// Note that channel_id changes during this call!
7242 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7243 /// If an Err is returned, it is a ChannelError::Close.
7244 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7245 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7246 if !self.context.is_outbound() {
7247 panic!("Tried to create outbound funding_created message on an inbound channel!");
7250 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7251 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7253 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7255 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7256 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7257 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7258 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7261 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7262 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7264 // Now that we're past error-generating stuff, update our local state:
7266 self.context.channel_state = ChannelState::FundingNegotiated;
7267 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7269 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7270 // We can skip this if it is a zero-conf channel.
7271 if funding_transaction.is_coin_base() &&
7272 self.context.minimum_depth.unwrap_or(0) > 0 &&
7273 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7274 self.context.minimum_depth = Some(COINBASE_MATURITY);
7277 self.context.funding_transaction = Some(funding_transaction);
7278 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7280 let funding_created = self.get_funding_created_msg(logger);
7281 if funding_created.is_none() {
7282 #[cfg(not(async_signing))] {
7283 panic!("Failed to get signature for new funding creation");
7285 #[cfg(async_signing)] {
7286 if !self.context.signer_pending_funding {
7287 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7288 self.context.signer_pending_funding = true;
7296 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7297 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7298 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7299 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7300 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7301 ) -> Result<msgs::OpenChannel, ()>
7303 F::Target: FeeEstimator
7305 if !self.context.is_outbound() ||
7307 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7308 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
7313 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
7314 // We've exhausted our options
7317 // We support opening a few different types of channels. Try removing our additional
7318 // features one by one until we've either arrived at our default or the counterparty has
7321 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
7322 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
7323 // checks whether the counterparty supports every feature, this would only happen if the
7324 // counterparty is advertising the feature, but rejecting channels proposing the feature for
7326 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
7327 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
7328 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
7329 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
7330 } else if self.context.channel_type.supports_scid_privacy() {
7331 self.context.channel_type.clear_scid_privacy();
7333 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
7335 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
7336 Ok(self.get_open_channel(chain_hash))
7339 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7340 if !self.context.is_outbound() {
7341 panic!("Tried to open a channel for an inbound channel?");
7343 if self.context.have_received_message() {
7344 panic!("Cannot generate an open_channel after we've moved forward");
7347 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7348 panic!("Tried to send an open_channel for a channel that has already advanced");
7351 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7352 let keys = self.context.get_holder_pubkeys();
7355 common_fields: msgs::CommonOpenChannelFields {
7357 temporary_channel_id: self.context.channel_id,
7358 funding_satoshis: self.context.channel_value_satoshis,
7359 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7360 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7361 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7362 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7363 to_self_delay: self.context.get_holder_selected_contest_delay(),
7364 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7365 funding_pubkey: keys.funding_pubkey,
7366 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7367 payment_basepoint: keys.payment_point,
7368 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7369 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7370 first_per_commitment_point,
7371 channel_flags: if self.context.config.announced_channel {1} else {0},
7372 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7373 Some(script) => script.clone().into_inner(),
7374 None => Builder::new().into_script(),
7376 channel_type: Some(self.context.channel_type.clone()),
7378 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7379 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7384 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7385 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7387 // Check sanity of message fields:
7388 if !self.context.is_outbound() {
7389 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7391 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7392 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7394 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7395 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7397 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7398 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7400 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7401 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7403 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7404 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7405 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7407 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7408 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7409 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7411 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7412 if msg.common_fields.to_self_delay > max_delay_acceptable {
7413 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7415 if msg.common_fields.max_accepted_htlcs < 1 {
7416 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7418 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7419 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7422 // Now check against optional parameters as set by config...
7423 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7424 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7426 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7427 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7429 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7430 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7432 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7433 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7435 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7436 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7438 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7439 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7441 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7442 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7445 if let Some(ty) = &msg.common_fields.channel_type {
7446 if *ty != self.context.channel_type {
7447 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7449 } else if their_features.supports_channel_type() {
7450 // Assume they've accepted the channel type as they said they understand it.
7452 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7453 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7454 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7456 self.context.channel_type = channel_type.clone();
7457 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7460 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7461 match &msg.common_fields.shutdown_scriptpubkey {
7462 &Some(ref script) => {
7463 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7464 if script.len() == 0 {
7467 if !script::is_bolt2_compliant(&script, their_features) {
7468 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7470 Some(script.clone())
7473 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7475 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7480 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7481 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7482 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7483 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7484 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7486 if peer_limits.trust_own_funding_0conf {
7487 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7489 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7492 let counterparty_pubkeys = ChannelPublicKeys {
7493 funding_pubkey: msg.common_fields.funding_pubkey,
7494 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7495 payment_point: msg.common_fields.payment_basepoint,
7496 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7497 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7500 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7501 selected_contest_delay: msg.common_fields.to_self_delay,
7502 pubkeys: counterparty_pubkeys,
7505 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7506 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7508 self.context.channel_state = ChannelState::NegotiatingFunding(
7509 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7511 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7516 /// Handles a funding_signed message from the remote end.
7517 /// If this call is successful, broadcast the funding transaction (and not before!)
7518 pub fn funding_signed<L: Deref>(
7519 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7520 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7524 if !self.context.is_outbound() {
7525 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7527 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7528 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7530 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7531 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7532 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7533 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7536 let funding_script = self.context.get_funding_redeemscript();
7538 let counterparty_keys = self.context.build_remote_transaction_keys();
7539 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7540 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7541 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7543 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7544 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7546 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7547 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7549 let trusted_tx = initial_commitment_tx.trust();
7550 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7551 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7552 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7553 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7554 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7558 let holder_commitment_tx = HolderCommitmentTransaction::new(
7559 initial_commitment_tx,
7562 &self.context.get_holder_pubkeys().funding_pubkey,
7563 self.context.counterparty_funding_pubkey()
7567 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7568 if validated.is_err() {
7569 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7572 let funding_redeemscript = self.context.get_funding_redeemscript();
7573 let funding_txo = self.context.get_funding_txo().unwrap();
7574 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7575 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7576 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7577 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7578 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7579 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7580 shutdown_script, self.context.get_holder_selected_contest_delay(),
7581 &self.context.destination_script, (funding_txo, funding_txo_script),
7582 &self.context.channel_transaction_parameters,
7583 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7585 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7586 channel_monitor.provide_initial_counterparty_commitment_tx(
7587 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7588 self.context.cur_counterparty_commitment_transaction_number,
7589 self.context.counterparty_cur_commitment_point.unwrap(),
7590 counterparty_initial_commitment_tx.feerate_per_kw(),
7591 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7592 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7594 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7595 if self.context.is_batch_funding() {
7596 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7598 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7600 self.context.cur_holder_commitment_transaction_number -= 1;
7601 self.context.cur_counterparty_commitment_transaction_number -= 1;
7603 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7605 let mut channel = Channel {
7606 context: self.context,
7607 #[cfg(dual_funding)]
7608 dual_funding_channel_context: None,
7611 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7612 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7613 Ok((channel, channel_monitor))
7616 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7618 #[cfg(async_signing)]
7619 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7620 if self.context.signer_pending_funding && self.context.is_outbound() {
7621 log_trace!(logger, "Signer unblocked a funding_created");
7622 self.get_funding_created_msg(logger)
7627 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7628 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7629 pub context: ChannelContext<SP>,
7630 pub unfunded_context: UnfundedChannelContext,
7633 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7634 /// [`msgs::CommonOpenChannelFields`].
7635 pub(super) fn channel_type_from_open_channel(
7636 common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7637 our_supported_features: &ChannelTypeFeatures
7638 ) -> Result<ChannelTypeFeatures, ChannelError> {
7639 if let Some(channel_type) = &common_fields.channel_type {
7640 if channel_type.supports_any_optional_bits() {
7641 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7644 // We only support the channel types defined by the `ChannelManager` in
7645 // `provided_channel_type_features`. The channel type must always support
7646 // `static_remote_key`.
7647 if !channel_type.requires_static_remote_key() {
7648 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7650 // Make sure we support all of the features behind the channel type.
7651 if !channel_type.is_subset(our_supported_features) {
7652 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7654 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7655 if channel_type.requires_scid_privacy() && announced_channel {
7656 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7658 Ok(channel_type.clone())
7660 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7661 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7662 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7668 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7669 /// Creates a new channel from a remote sides' request for one.
7670 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7671 pub fn new<ES: Deref, F: Deref, L: Deref>(
7672 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7673 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7674 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7675 current_chain_height: u32, logger: &L, is_0conf: bool,
7676 ) -> Result<InboundV1Channel<SP>, ChannelError>
7677 where ES::Target: EntropySource,
7678 F::Target: FeeEstimator,
7681 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7683 // First check the channel type is known, failing before we do anything else if we don't
7684 // support this channel type.
7685 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7687 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7688 let counterparty_pubkeys = ChannelPublicKeys {
7689 funding_pubkey: msg.common_fields.funding_pubkey,
7690 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7691 payment_point: msg.common_fields.payment_basepoint,
7692 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7693 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7697 context: ChannelContext::new_for_inbound_channel(
7701 counterparty_node_id,
7705 current_chain_height,
7710 counterparty_pubkeys,
7712 holder_selected_channel_reserve_satoshis,
7713 msg.channel_reserve_satoshis,
7715 msg.common_fields.clone(),
7717 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7722 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7723 /// should be sent back to the counterparty node.
7725 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7726 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7727 if self.context.is_outbound() {
7728 panic!("Tried to send accept_channel for an outbound channel?");
7731 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7732 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7734 panic!("Tried to send accept_channel after channel had moved forward");
7736 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7737 panic!("Tried to send an accept_channel for a channel that has already advanced");
7740 self.generate_accept_channel_message()
7743 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7744 /// inbound channel. If the intention is to accept an inbound channel, use
7745 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7747 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7748 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7749 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7750 let keys = self.context.get_holder_pubkeys();
7752 msgs::AcceptChannel {
7753 common_fields: msgs::CommonAcceptChannelFields {
7754 temporary_channel_id: self.context.channel_id,
7755 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7756 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7757 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7758 minimum_depth: self.context.minimum_depth.unwrap(),
7759 to_self_delay: self.context.get_holder_selected_contest_delay(),
7760 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7761 funding_pubkey: keys.funding_pubkey,
7762 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7763 payment_basepoint: keys.payment_point,
7764 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7765 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7766 first_per_commitment_point,
7767 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7768 Some(script) => script.clone().into_inner(),
7769 None => Builder::new().into_script(),
7771 channel_type: Some(self.context.channel_type.clone()),
7773 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7775 next_local_nonce: None,
7779 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7780 /// inbound channel without accepting it.
7782 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7784 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7785 self.generate_accept_channel_message()
7788 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7789 let funding_script = self.context.get_funding_redeemscript();
7791 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7792 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7793 let trusted_tx = initial_commitment_tx.trust();
7794 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7795 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7796 // They sign the holder commitment transaction...
7797 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7798 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7799 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7800 encode::serialize_hex(&funding_script), &self.context.channel_id());
7801 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7803 Ok(initial_commitment_tx)
7806 pub fn funding_created<L: Deref>(
7807 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7808 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7812 if self.context.is_outbound() {
7813 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7816 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7817 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7819 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7820 // remember the channel, so it's safe to just send an error_message here and drop the
7822 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7824 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7825 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7826 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7827 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7830 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7831 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7832 // This is an externally observable change before we finish all our checks. In particular
7833 // check_funding_created_signature may fail.
7834 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7836 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7838 Err(ChannelError::Close(e)) => {
7839 self.context.channel_transaction_parameters.funding_outpoint = None;
7840 return Err((self, ChannelError::Close(e)));
7843 // The only error we know how to handle is ChannelError::Close, so we fall over here
7844 // to make sure we don't continue with an inconsistent state.
7845 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7849 let holder_commitment_tx = HolderCommitmentTransaction::new(
7850 initial_commitment_tx,
7853 &self.context.get_holder_pubkeys().funding_pubkey,
7854 self.context.counterparty_funding_pubkey()
7857 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7858 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7861 // Now that we're past error-generating stuff, update our local state:
7863 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7864 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7865 self.context.cur_counterparty_commitment_transaction_number -= 1;
7866 self.context.cur_holder_commitment_transaction_number -= 1;
7868 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7870 let funding_redeemscript = self.context.get_funding_redeemscript();
7871 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7872 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7873 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7874 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7875 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7876 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7877 shutdown_script, self.context.get_holder_selected_contest_delay(),
7878 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7879 &self.context.channel_transaction_parameters,
7880 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7882 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7883 channel_monitor.provide_initial_counterparty_commitment_tx(
7884 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7885 self.context.cur_counterparty_commitment_transaction_number + 1,
7886 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7887 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7888 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7890 log_info!(logger, "{} funding_signed for peer for channel {}",
7891 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7893 // Promote the channel to a full-fledged one now that we have updated the state and have a
7894 // `ChannelMonitor`.
7895 let mut channel = Channel {
7896 context: self.context,
7897 #[cfg(dual_funding)]
7898 dual_funding_channel_context: None,
7900 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7901 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7903 Ok((channel, funding_signed, channel_monitor))
7907 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
7908 #[cfg(dual_funding)]
7909 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
7910 pub context: ChannelContext<SP>,
7911 pub unfunded_context: UnfundedChannelContext,
7912 #[cfg(dual_funding)]
7913 pub dual_funding_context: DualFundingChannelContext,
7916 #[cfg(dual_funding)]
7917 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
7918 pub fn new<ES: Deref, F: Deref>(
7919 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7920 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
7921 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
7922 funding_confirmation_target: ConfirmationTarget,
7923 ) -> Result<OutboundV2Channel<SP>, APIError>
7924 where ES::Target: EntropySource,
7925 F::Target: FeeEstimator,
7927 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
7928 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
7929 let pubkeys = holder_signer.pubkeys().clone();
7931 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
7933 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
7934 funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7936 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
7937 let funding_tx_locktime = current_chain_height;
7940 context: ChannelContext::new_for_outbound_channel(
7944 counterparty_node_id,
7950 current_chain_height,
7951 outbound_scid_alias,
7952 temporary_channel_id,
7953 holder_selected_channel_reserve_satoshis,
7958 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
7959 dual_funding_context: DualFundingChannelContext {
7960 our_funding_satoshis: funding_satoshis,
7961 their_funding_satoshis: 0,
7962 funding_tx_locktime,
7963 funding_feerate_sat_per_1000_weight,
7969 pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
7970 if self.context.have_received_message() {
7971 debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
7974 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7975 debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
7978 let first_per_commitment_point = self.context.holder_signer.as_ref()
7979 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
7980 &self.context.secp_ctx);
7981 let second_per_commitment_point = self.context.holder_signer.as_ref()
7982 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
7983 &self.context.secp_ctx);
7984 let keys = self.context.get_holder_pubkeys();
7986 msgs::OpenChannelV2 {
7987 common_fields: msgs::CommonOpenChannelFields {
7989 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7990 funding_satoshis: self.context.channel_value_satoshis,
7991 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7992 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7993 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7994 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
7995 to_self_delay: self.context.get_holder_selected_contest_delay(),
7996 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7997 funding_pubkey: keys.funding_pubkey,
7998 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7999 payment_basepoint: keys.payment_point,
8000 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8001 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8002 first_per_commitment_point,
8003 channel_flags: if self.context.config.announced_channel {1} else {0},
8004 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8005 Some(script) => script.clone().into_inner(),
8006 None => Builder::new().into_script(),
8008 channel_type: Some(self.context.channel_type.clone()),
8010 funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8011 second_per_commitment_point,
8012 locktime: self.dual_funding_context.funding_tx_locktime,
8013 require_confirmed_inputs: None,
8018 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8019 #[cfg(dual_funding)]
8020 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8021 pub context: ChannelContext<SP>,
8022 pub unfunded_context: UnfundedChannelContext,
8023 pub dual_funding_context: DualFundingChannelContext,
8026 #[cfg(dual_funding)]
8027 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8028 /// Creates a new dual-funded channel from a remote side's request for one.
8029 /// Assumes chain_hash has already been checked and corresponds with what we expect!
8030 pub fn new<ES: Deref, F: Deref, L: Deref>(
8031 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8032 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8033 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8034 config: &UserConfig, current_chain_height: u32, logger: &L,
8035 ) -> Result<InboundV2Channel<SP>, ChannelError>
8036 where ES::Target: EntropySource,
8037 F::Target: FeeEstimator,
8040 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8041 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8042 channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8043 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8044 channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8046 // First check the channel type is known, failing before we do anything else if we don't
8047 // support this channel type.
8048 if msg.common_fields.channel_type.is_none() {
8049 return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8050 msg.common_fields.temporary_channel_id)))
8052 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8054 let counterparty_pubkeys = ChannelPublicKeys {
8055 funding_pubkey: msg.common_fields.funding_pubkey,
8056 revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8057 payment_point: msg.common_fields.payment_basepoint,
8058 delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8059 htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8062 let mut context = ChannelContext::new_for_inbound_channel(
8066 counterparty_node_id,
8070 current_chain_height,
8076 counterparty_pubkeys,
8078 holder_selected_channel_reserve_satoshis,
8079 counterparty_selected_channel_reserve_satoshis,
8080 0 /* push_msat not used in dual-funding */,
8081 msg.common_fields.clone(),
8083 let channel_id = ChannelId::v2_from_revocation_basepoints(
8084 &context.get_holder_pubkeys().revocation_basepoint,
8085 &context.get_counterparty_pubkeys().revocation_basepoint);
8086 context.channel_id = channel_id;
8090 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8091 dual_funding_context: DualFundingChannelContext {
8092 our_funding_satoshis: funding_satoshis,
8093 their_funding_satoshis: msg.common_fields.funding_satoshis,
8094 funding_tx_locktime: msg.locktime,
8095 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8102 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8103 /// should be sent back to the counterparty node.
8105 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8106 pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8107 if self.context.is_outbound() {
8108 debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8111 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8112 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8114 debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8116 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8117 debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8120 self.generate_accept_channel_v2_message()
8123 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8124 /// inbound channel. If the intention is to accept an inbound channel, use
8125 /// [`InboundV1Channel::accept_inbound_channel`] instead.
8127 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8128 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8129 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8130 self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8131 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8132 self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8133 let keys = self.context.get_holder_pubkeys();
8135 msgs::AcceptChannelV2 {
8136 common_fields: msgs::CommonAcceptChannelFields {
8137 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8138 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8139 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8140 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8141 minimum_depth: self.context.minimum_depth.unwrap(),
8142 to_self_delay: self.context.get_holder_selected_contest_delay(),
8143 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8144 funding_pubkey: keys.funding_pubkey,
8145 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8146 payment_basepoint: keys.payment_point,
8147 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8148 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8149 first_per_commitment_point,
8150 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8151 Some(script) => script.clone().into_inner(),
8152 None => Builder::new().into_script(),
8154 channel_type: Some(self.context.channel_type.clone()),
8156 funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8157 second_per_commitment_point,
8158 require_confirmed_inputs: None,
8162 /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8163 /// inbound channel without accepting it.
8165 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8167 pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8168 self.generate_accept_channel_v2_message()
8172 // Unfunded channel utilities
8174 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8175 // The default channel type (ie the first one we try) depends on whether the channel is
8176 // public - if it is, we just go with `only_static_remotekey` as it's the only option
8177 // available. If it's private, we first try `scid_privacy` as it provides better privacy
8178 // with no other changes, and fall back to `only_static_remotekey`.
8179 let mut ret = ChannelTypeFeatures::only_static_remote_key();
8180 if !config.channel_handshake_config.announced_channel &&
8181 config.channel_handshake_config.negotiate_scid_privacy &&
8182 their_features.supports_scid_privacy() {
8183 ret.set_scid_privacy_required();
8186 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8187 // set it now. If they don't understand it, we'll fall back to our default of
8188 // `only_static_remotekey`.
8189 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8190 their_features.supports_anchors_zero_fee_htlc_tx() {
8191 ret.set_anchors_zero_fee_htlc_tx_required();
8197 const SERIALIZATION_VERSION: u8 = 3;
8198 const MIN_SERIALIZATION_VERSION: u8 = 3;
8200 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8206 impl Writeable for ChannelUpdateStatus {
8207 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8208 // We only care about writing out the current state as it was announced, ie only either
8209 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8210 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8212 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8213 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8214 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8215 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8221 impl Readable for ChannelUpdateStatus {
8222 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8223 Ok(match <u8 as Readable>::read(reader)? {
8224 0 => ChannelUpdateStatus::Enabled,
8225 1 => ChannelUpdateStatus::Disabled,
8226 _ => return Err(DecodeError::InvalidValue),
8231 impl Writeable for AnnouncementSigsState {
8232 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8233 // We only care about writing out the current state as if we had just disconnected, at
8234 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8236 AnnouncementSigsState::NotSent => 0u8.write(writer),
8237 AnnouncementSigsState::MessageSent => 0u8.write(writer),
8238 AnnouncementSigsState::Committed => 0u8.write(writer),
8239 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8244 impl Readable for AnnouncementSigsState {
8245 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8246 Ok(match <u8 as Readable>::read(reader)? {
8247 0 => AnnouncementSigsState::NotSent,
8248 1 => AnnouncementSigsState::PeerReceived,
8249 _ => return Err(DecodeError::InvalidValue),
8254 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8255 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8256 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8259 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
8261 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8262 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8263 // the low bytes now and the optional high bytes later.
8264 let user_id_low = self.context.user_id as u64;
8265 user_id_low.write(writer)?;
8267 // Version 1 deserializers expected to read parts of the config object here. Version 2
8268 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8269 // `minimum_depth` we simply write dummy values here.
8270 writer.write_all(&[0; 8])?;
8272 self.context.channel_id.write(writer)?;
8274 let mut channel_state = self.context.channel_state;
8275 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8276 channel_state.set_peer_disconnected();
8278 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8280 channel_state.to_u32().write(writer)?;
8282 self.context.channel_value_satoshis.write(writer)?;
8284 self.context.latest_monitor_update_id.write(writer)?;
8286 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8287 // deserialized from that format.
8288 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8289 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8290 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8292 self.context.destination_script.write(writer)?;
8294 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8295 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8296 self.context.value_to_self_msat.write(writer)?;
8298 let mut dropped_inbound_htlcs = 0;
8299 for htlc in self.context.pending_inbound_htlcs.iter() {
8300 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8301 dropped_inbound_htlcs += 1;
8304 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8305 for htlc in self.context.pending_inbound_htlcs.iter() {
8306 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8309 htlc.htlc_id.write(writer)?;
8310 htlc.amount_msat.write(writer)?;
8311 htlc.cltv_expiry.write(writer)?;
8312 htlc.payment_hash.write(writer)?;
8314 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8315 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
8317 htlc_state.write(writer)?;
8319 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
8321 htlc_state.write(writer)?;
8323 &InboundHTLCState::Committed => {
8326 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8328 removal_reason.write(writer)?;
8333 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8334 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8335 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8337 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8338 for htlc in self.context.pending_outbound_htlcs.iter() {
8339 htlc.htlc_id.write(writer)?;
8340 htlc.amount_msat.write(writer)?;
8341 htlc.cltv_expiry.write(writer)?;
8342 htlc.payment_hash.write(writer)?;
8343 htlc.source.write(writer)?;
8345 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8347 onion_packet.write(writer)?;
8349 &OutboundHTLCState::Committed => {
8352 &OutboundHTLCState::RemoteRemoved(_) => {
8353 // Treat this as a Committed because we haven't received the CS - they'll
8354 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8357 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8359 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8360 preimages.push(preimage);
8362 let reason: Option<&HTLCFailReason> = outcome.into();
8363 reason.write(writer)?;
8365 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8367 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8368 preimages.push(preimage);
8370 let reason: Option<&HTLCFailReason> = outcome.into();
8371 reason.write(writer)?;
8374 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8375 pending_outbound_blinding_points.push(htlc.blinding_point);
8378 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8379 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8380 // Vec of (htlc_id, failure_code, sha256_of_onion)
8381 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8382 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8383 for update in self.context.holding_cell_htlc_updates.iter() {
8385 &HTLCUpdateAwaitingACK::AddHTLC {
8386 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8387 blinding_point, skimmed_fee_msat,
8390 amount_msat.write(writer)?;
8391 cltv_expiry.write(writer)?;
8392 payment_hash.write(writer)?;
8393 source.write(writer)?;
8394 onion_routing_packet.write(writer)?;
8396 holding_cell_skimmed_fees.push(skimmed_fee_msat);
8397 holding_cell_blinding_points.push(blinding_point);
8399 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8401 payment_preimage.write(writer)?;
8402 htlc_id.write(writer)?;
8404 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8406 htlc_id.write(writer)?;
8407 err_packet.write(writer)?;
8409 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8410 htlc_id, failure_code, sha256_of_onion
8412 // We don't want to break downgrading by adding a new variant, so write a dummy
8413 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8414 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8416 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8418 htlc_id.write(writer)?;
8419 dummy_err_packet.write(writer)?;
8424 match self.context.resend_order {
8425 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8426 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8429 self.context.monitor_pending_channel_ready.write(writer)?;
8430 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8431 self.context.monitor_pending_commitment_signed.write(writer)?;
8433 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8434 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8435 pending_forward.write(writer)?;
8436 htlc_id.write(writer)?;
8439 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8440 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8441 htlc_source.write(writer)?;
8442 payment_hash.write(writer)?;
8443 fail_reason.write(writer)?;
8446 if self.context.is_outbound() {
8447 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8448 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8449 Some(feerate).write(writer)?;
8451 // As for inbound HTLCs, if the update was only announced and never committed in a
8452 // commitment_signed, drop it.
8453 None::<u32>.write(writer)?;
8455 self.context.holding_cell_update_fee.write(writer)?;
8457 self.context.next_holder_htlc_id.write(writer)?;
8458 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8459 self.context.update_time_counter.write(writer)?;
8460 self.context.feerate_per_kw.write(writer)?;
8462 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8463 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8464 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8465 // consider the stale state on reload.
8468 self.context.funding_tx_confirmed_in.write(writer)?;
8469 self.context.funding_tx_confirmation_height.write(writer)?;
8470 self.context.short_channel_id.write(writer)?;
8472 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8473 self.context.holder_dust_limit_satoshis.write(writer)?;
8474 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8476 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8477 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8479 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8480 self.context.holder_htlc_minimum_msat.write(writer)?;
8481 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8483 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8484 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8486 match &self.context.counterparty_forwarding_info {
8489 info.fee_base_msat.write(writer)?;
8490 info.fee_proportional_millionths.write(writer)?;
8491 info.cltv_expiry_delta.write(writer)?;
8493 None => 0u8.write(writer)?
8496 self.context.channel_transaction_parameters.write(writer)?;
8497 self.context.funding_transaction.write(writer)?;
8499 self.context.counterparty_cur_commitment_point.write(writer)?;
8500 self.context.counterparty_prev_commitment_point.write(writer)?;
8501 self.context.counterparty_node_id.write(writer)?;
8503 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8505 self.context.commitment_secrets.write(writer)?;
8507 self.context.channel_update_status.write(writer)?;
8509 #[cfg(any(test, fuzzing))]
8510 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8511 #[cfg(any(test, fuzzing))]
8512 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8513 htlc.write(writer)?;
8516 // If the channel type is something other than only-static-remote-key, then we need to have
8517 // older clients fail to deserialize this channel at all. If the type is
8518 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8520 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8521 Some(&self.context.channel_type) } else { None };
8523 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8524 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8525 // a different percentage of the channel value then 10%, which older versions of LDK used
8526 // to set it to before the percentage was made configurable.
8527 let serialized_holder_selected_reserve =
8528 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8529 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8531 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8532 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8533 let serialized_holder_htlc_max_in_flight =
8534 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8535 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8537 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8538 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8540 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8541 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8542 // we write the high bytes as an option here.
8543 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8545 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8547 write_tlv_fields!(writer, {
8548 (0, self.context.announcement_sigs, option),
8549 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8550 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8551 // them twice, once with their original default values above, and once as an option
8552 // here. On the read side, old versions will simply ignore the odd-type entries here,
8553 // and new versions map the default values to None and allow the TLV entries here to
8555 (1, self.context.minimum_depth, option),
8556 (2, chan_type, option),
8557 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8558 (4, serialized_holder_selected_reserve, option),
8559 (5, self.context.config, required),
8560 (6, serialized_holder_htlc_max_in_flight, option),
8561 (7, self.context.shutdown_scriptpubkey, option),
8562 (8, self.context.blocked_monitor_updates, optional_vec),
8563 (9, self.context.target_closing_feerate_sats_per_kw, option),
8564 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8565 (13, self.context.channel_creation_height, required),
8566 (15, preimages, required_vec),
8567 (17, self.context.announcement_sigs_state, required),
8568 (19, self.context.latest_inbound_scid_alias, option),
8569 (21, self.context.outbound_scid_alias, required),
8570 (23, channel_ready_event_emitted, option),
8571 (25, user_id_high_opt, option),
8572 (27, self.context.channel_keys_id, required),
8573 (28, holder_max_accepted_htlcs, option),
8574 (29, self.context.temporary_channel_id, option),
8575 (31, channel_pending_event_emitted, option),
8576 (35, pending_outbound_skimmed_fees, optional_vec),
8577 (37, holding_cell_skimmed_fees, optional_vec),
8578 (38, self.context.is_batch_funding, option),
8579 (39, pending_outbound_blinding_points, optional_vec),
8580 (41, holding_cell_blinding_points, optional_vec),
8581 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8582 (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8589 const MAX_ALLOC_SIZE: usize = 64*1024;
8590 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8592 ES::Target: EntropySource,
8593 SP::Target: SignerProvider
8595 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8596 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8597 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8599 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8600 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8601 // the low bytes now and the high bytes later.
8602 let user_id_low: u64 = Readable::read(reader)?;
8604 let mut config = Some(LegacyChannelConfig::default());
8606 // Read the old serialization of the ChannelConfig from version 0.0.98.
8607 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8608 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8609 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8610 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8612 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8613 let mut _val: u64 = Readable::read(reader)?;
8616 let channel_id = Readable::read(reader)?;
8617 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8618 let channel_value_satoshis = Readable::read(reader)?;
8620 let latest_monitor_update_id = Readable::read(reader)?;
8622 let mut keys_data = None;
8624 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8625 // the `channel_keys_id` TLV is present below.
8626 let keys_len: u32 = Readable::read(reader)?;
8627 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8628 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8629 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8630 let mut data = [0; 1024];
8631 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8632 reader.read_exact(read_slice)?;
8633 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8637 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8638 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8639 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8642 let destination_script = Readable::read(reader)?;
8644 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8645 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8646 let value_to_self_msat = Readable::read(reader)?;
8648 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8650 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8651 for _ in 0..pending_inbound_htlc_count {
8652 pending_inbound_htlcs.push(InboundHTLCOutput {
8653 htlc_id: Readable::read(reader)?,
8654 amount_msat: Readable::read(reader)?,
8655 cltv_expiry: Readable::read(reader)?,
8656 payment_hash: Readable::read(reader)?,
8657 state: match <u8 as Readable>::read(reader)? {
8658 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8659 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8660 3 => InboundHTLCState::Committed,
8661 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8662 _ => return Err(DecodeError::InvalidValue),
8667 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8668 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8669 for _ in 0..pending_outbound_htlc_count {
8670 pending_outbound_htlcs.push(OutboundHTLCOutput {
8671 htlc_id: Readable::read(reader)?,
8672 amount_msat: Readable::read(reader)?,
8673 cltv_expiry: Readable::read(reader)?,
8674 payment_hash: Readable::read(reader)?,
8675 source: Readable::read(reader)?,
8676 state: match <u8 as Readable>::read(reader)? {
8677 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8678 1 => OutboundHTLCState::Committed,
8680 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8681 OutboundHTLCState::RemoteRemoved(option.into())
8684 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8685 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8688 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8689 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8691 _ => return Err(DecodeError::InvalidValue),
8693 skimmed_fee_msat: None,
8694 blinding_point: None,
8698 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8699 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8700 for _ in 0..holding_cell_htlc_update_count {
8701 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8702 0 => HTLCUpdateAwaitingACK::AddHTLC {
8703 amount_msat: Readable::read(reader)?,
8704 cltv_expiry: Readable::read(reader)?,
8705 payment_hash: Readable::read(reader)?,
8706 source: Readable::read(reader)?,
8707 onion_routing_packet: Readable::read(reader)?,
8708 skimmed_fee_msat: None,
8709 blinding_point: None,
8711 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8712 payment_preimage: Readable::read(reader)?,
8713 htlc_id: Readable::read(reader)?,
8715 2 => HTLCUpdateAwaitingACK::FailHTLC {
8716 htlc_id: Readable::read(reader)?,
8717 err_packet: Readable::read(reader)?,
8719 _ => return Err(DecodeError::InvalidValue),
8723 let resend_order = match <u8 as Readable>::read(reader)? {
8724 0 => RAACommitmentOrder::CommitmentFirst,
8725 1 => RAACommitmentOrder::RevokeAndACKFirst,
8726 _ => return Err(DecodeError::InvalidValue),
8729 let monitor_pending_channel_ready = Readable::read(reader)?;
8730 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8731 let monitor_pending_commitment_signed = Readable::read(reader)?;
8733 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8734 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8735 for _ in 0..monitor_pending_forwards_count {
8736 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8739 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8740 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8741 for _ in 0..monitor_pending_failures_count {
8742 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8745 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8747 let holding_cell_update_fee = Readable::read(reader)?;
8749 let next_holder_htlc_id = Readable::read(reader)?;
8750 let next_counterparty_htlc_id = Readable::read(reader)?;
8751 let update_time_counter = Readable::read(reader)?;
8752 let feerate_per_kw = Readable::read(reader)?;
8754 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8755 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8756 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8757 // consider the stale state on reload.
8758 match <u8 as Readable>::read(reader)? {
8761 let _: u32 = Readable::read(reader)?;
8762 let _: u64 = Readable::read(reader)?;
8763 let _: Signature = Readable::read(reader)?;
8765 _ => return Err(DecodeError::InvalidValue),
8768 let funding_tx_confirmed_in = Readable::read(reader)?;
8769 let funding_tx_confirmation_height = Readable::read(reader)?;
8770 let short_channel_id = Readable::read(reader)?;
8772 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8773 let holder_dust_limit_satoshis = Readable::read(reader)?;
8774 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8775 let mut counterparty_selected_channel_reserve_satoshis = None;
8777 // Read the old serialization from version 0.0.98.
8778 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8780 // Read the 8 bytes of backwards-compatibility data.
8781 let _dummy: u64 = Readable::read(reader)?;
8783 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8784 let holder_htlc_minimum_msat = Readable::read(reader)?;
8785 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8787 let mut minimum_depth = None;
8789 // Read the old serialization from version 0.0.98.
8790 minimum_depth = Some(Readable::read(reader)?);
8792 // Read the 4 bytes of backwards-compatibility data.
8793 let _dummy: u32 = Readable::read(reader)?;
8796 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8798 1 => Some(CounterpartyForwardingInfo {
8799 fee_base_msat: Readable::read(reader)?,
8800 fee_proportional_millionths: Readable::read(reader)?,
8801 cltv_expiry_delta: Readable::read(reader)?,
8803 _ => return Err(DecodeError::InvalidValue),
8806 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8807 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8809 let counterparty_cur_commitment_point = Readable::read(reader)?;
8811 let counterparty_prev_commitment_point = Readable::read(reader)?;
8812 let counterparty_node_id = Readable::read(reader)?;
8814 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8815 let commitment_secrets = Readable::read(reader)?;
8817 let channel_update_status = Readable::read(reader)?;
8819 #[cfg(any(test, fuzzing))]
8820 let mut historical_inbound_htlc_fulfills = new_hash_set();
8821 #[cfg(any(test, fuzzing))]
8823 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8824 for _ in 0..htlc_fulfills_len {
8825 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8829 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8830 Some((feerate, if channel_parameters.is_outbound_from_holder {
8831 FeeUpdateState::Outbound
8833 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8839 let mut announcement_sigs = None;
8840 let mut target_closing_feerate_sats_per_kw = None;
8841 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8842 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8843 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8844 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8845 // only, so we default to that if none was written.
8846 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8847 let mut channel_creation_height = Some(serialized_height);
8848 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8850 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8851 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8852 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8853 let mut latest_inbound_scid_alias = None;
8854 let mut outbound_scid_alias = None;
8855 let mut channel_pending_event_emitted = None;
8856 let mut channel_ready_event_emitted = None;
8858 let mut user_id_high_opt: Option<u64> = None;
8859 let mut channel_keys_id: Option<[u8; 32]> = None;
8860 let mut temporary_channel_id: Option<ChannelId> = None;
8861 let mut holder_max_accepted_htlcs: Option<u16> = None;
8863 let mut blocked_monitor_updates = Some(Vec::new());
8865 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8866 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8868 let mut is_batch_funding: Option<()> = None;
8870 let mut local_initiated_shutdown: Option<()> = None;
8872 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8873 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8875 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8877 read_tlv_fields!(reader, {
8878 (0, announcement_sigs, option),
8879 (1, minimum_depth, option),
8880 (2, channel_type, option),
8881 (3, counterparty_selected_channel_reserve_satoshis, option),
8882 (4, holder_selected_channel_reserve_satoshis, option),
8883 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8884 (6, holder_max_htlc_value_in_flight_msat, option),
8885 (7, shutdown_scriptpubkey, option),
8886 (8, blocked_monitor_updates, optional_vec),
8887 (9, target_closing_feerate_sats_per_kw, option),
8888 (11, monitor_pending_finalized_fulfills, optional_vec),
8889 (13, channel_creation_height, option),
8890 (15, preimages_opt, optional_vec),
8891 (17, announcement_sigs_state, option),
8892 (19, latest_inbound_scid_alias, option),
8893 (21, outbound_scid_alias, option),
8894 (23, channel_ready_event_emitted, option),
8895 (25, user_id_high_opt, option),
8896 (27, channel_keys_id, option),
8897 (28, holder_max_accepted_htlcs, option),
8898 (29, temporary_channel_id, option),
8899 (31, channel_pending_event_emitted, option),
8900 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8901 (37, holding_cell_skimmed_fees_opt, optional_vec),
8902 (38, is_batch_funding, option),
8903 (39, pending_outbound_blinding_points_opt, optional_vec),
8904 (41, holding_cell_blinding_points_opt, optional_vec),
8905 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8906 (45, local_initiated_shutdown, option),
8909 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8910 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8911 // If we've gotten to the funding stage of the channel, populate the signer with its
8912 // required channel parameters.
8913 if channel_state >= ChannelState::FundingNegotiated {
8914 holder_signer.provide_channel_parameters(&channel_parameters);
8916 (channel_keys_id, holder_signer)
8918 // `keys_data` can be `None` if we had corrupted data.
8919 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8920 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8921 (holder_signer.channel_keys_id(), holder_signer)
8924 if let Some(preimages) = preimages_opt {
8925 let mut iter = preimages.into_iter();
8926 for htlc in pending_outbound_htlcs.iter_mut() {
8928 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8929 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8931 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8932 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8937 // We expect all preimages to be consumed above
8938 if iter.next().is_some() {
8939 return Err(DecodeError::InvalidValue);
8943 let chan_features = channel_type.as_ref().unwrap();
8944 if !chan_features.is_subset(our_supported_features) {
8945 // If the channel was written by a new version and negotiated with features we don't
8946 // understand yet, refuse to read it.
8947 return Err(DecodeError::UnknownRequiredFeature);
8950 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8951 // To account for that, we're proactively setting/overriding the field here.
8952 channel_parameters.channel_type_features = chan_features.clone();
8954 let mut secp_ctx = Secp256k1::new();
8955 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8957 // `user_id` used to be a single u64 value. In order to remain backwards
8958 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8959 // separate u64 values.
8960 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8962 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8964 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8965 let mut iter = skimmed_fees.into_iter();
8966 for htlc in pending_outbound_htlcs.iter_mut() {
8967 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8969 // We expect all skimmed fees to be consumed above
8970 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8972 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8973 let mut iter = skimmed_fees.into_iter();
8974 for htlc in holding_cell_htlc_updates.iter_mut() {
8975 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8976 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8979 // We expect all skimmed fees to be consumed above
8980 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8982 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8983 let mut iter = blinding_pts.into_iter();
8984 for htlc in pending_outbound_htlcs.iter_mut() {
8985 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8987 // We expect all blinding points to be consumed above
8988 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8990 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8991 let mut iter = blinding_pts.into_iter();
8992 for htlc in holding_cell_htlc_updates.iter_mut() {
8993 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8994 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8997 // We expect all blinding points to be consumed above
8998 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9001 if let Some(malformed_htlcs) = malformed_htlcs {
9002 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9003 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9004 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9005 let matches = *htlc_id == malformed_htlc_id;
9006 if matches { debug_assert!(err_packet.data.is_empty()) }
9009 }).ok_or(DecodeError::InvalidValue)?;
9010 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9011 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9013 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9018 context: ChannelContext {
9021 config: config.unwrap(),
9025 // Note that we don't care about serializing handshake limits as we only ever serialize
9026 // channel data after the handshake has completed.
9027 inbound_handshake_limits_override: None,
9030 temporary_channel_id,
9032 announcement_sigs_state: announcement_sigs_state.unwrap(),
9034 channel_value_satoshis,
9036 latest_monitor_update_id,
9038 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9039 shutdown_scriptpubkey,
9042 cur_holder_commitment_transaction_number,
9043 cur_counterparty_commitment_transaction_number,
9046 holder_max_accepted_htlcs,
9047 pending_inbound_htlcs,
9048 pending_outbound_htlcs,
9049 holding_cell_htlc_updates,
9053 monitor_pending_channel_ready,
9054 monitor_pending_revoke_and_ack,
9055 monitor_pending_commitment_signed,
9056 monitor_pending_forwards,
9057 monitor_pending_failures,
9058 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9060 signer_pending_commitment_update: false,
9061 signer_pending_funding: false,
9064 holding_cell_update_fee,
9065 next_holder_htlc_id,
9066 next_counterparty_htlc_id,
9067 update_time_counter,
9070 #[cfg(debug_assertions)]
9071 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9072 #[cfg(debug_assertions)]
9073 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9075 last_sent_closing_fee: None,
9076 pending_counterparty_closing_signed: None,
9077 expecting_peer_commitment_signed: false,
9078 closing_fee_limits: None,
9079 target_closing_feerate_sats_per_kw,
9081 funding_tx_confirmed_in,
9082 funding_tx_confirmation_height,
9084 channel_creation_height: channel_creation_height.unwrap(),
9086 counterparty_dust_limit_satoshis,
9087 holder_dust_limit_satoshis,
9088 counterparty_max_htlc_value_in_flight_msat,
9089 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9090 counterparty_selected_channel_reserve_satoshis,
9091 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9092 counterparty_htlc_minimum_msat,
9093 holder_htlc_minimum_msat,
9094 counterparty_max_accepted_htlcs,
9097 counterparty_forwarding_info,
9099 channel_transaction_parameters: channel_parameters,
9100 funding_transaction,
9103 counterparty_cur_commitment_point,
9104 counterparty_prev_commitment_point,
9105 counterparty_node_id,
9107 counterparty_shutdown_scriptpubkey,
9111 channel_update_status,
9112 closing_signed_in_flight: false,
9116 #[cfg(any(test, fuzzing))]
9117 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9118 #[cfg(any(test, fuzzing))]
9119 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9121 workaround_lnd_bug_4006: None,
9122 sent_message_awaiting_response: None,
9124 latest_inbound_scid_alias,
9125 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9126 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9128 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9129 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9131 #[cfg(any(test, fuzzing))]
9132 historical_inbound_htlc_fulfills,
9134 channel_type: channel_type.unwrap(),
9137 local_initiated_shutdown,
9139 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9141 #[cfg(dual_funding)]
9142 dual_funding_channel_context: None,
9150 use bitcoin::blockdata::constants::ChainHash;
9151 use bitcoin::blockdata::script::{ScriptBuf, Builder};
9152 use bitcoin::blockdata::transaction::{Transaction, TxOut};
9153 use bitcoin::blockdata::opcodes;
9154 use bitcoin::network::constants::Network;
9155 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9156 use crate::ln::{PaymentHash, PaymentPreimage};
9157 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9158 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9159 use crate::ln::channel::InitFeatures;
9160 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9161 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9162 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9163 use crate::ln::msgs;
9164 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9165 use crate::ln::script::ShutdownScript;
9166 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9167 use crate::chain::BestBlock;
9168 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9169 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9170 use crate::chain::transaction::OutPoint;
9171 use crate::routing::router::{Path, RouteHop};
9172 use crate::util::config::UserConfig;
9173 use crate::util::errors::APIError;
9174 use crate::util::ser::{ReadableArgs, Writeable};
9175 use crate::util::test_utils;
9176 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9177 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9178 use bitcoin::secp256k1::ffi::Signature as FFISignature;
9179 use bitcoin::secp256k1::{SecretKey,PublicKey};
9180 use bitcoin::hashes::sha256::Hash as Sha256;
9181 use bitcoin::hashes::Hash;
9182 use bitcoin::hashes::hex::FromHex;
9183 use bitcoin::hash_types::WPubkeyHash;
9184 use bitcoin::blockdata::locktime::absolute::LockTime;
9185 use bitcoin::address::{WitnessProgram, WitnessVersion};
9186 use crate::prelude::*;
9189 fn test_channel_state_order() {
9190 use crate::ln::channel::NegotiatingFundingFlags;
9191 use crate::ln::channel::AwaitingChannelReadyFlags;
9192 use crate::ln::channel::ChannelReadyFlags;
9194 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9195 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9196 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9197 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9200 struct TestFeeEstimator {
9203 impl FeeEstimator for TestFeeEstimator {
9204 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9210 fn test_max_funding_satoshis_no_wumbo() {
9211 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9212 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9213 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9217 signer: InMemorySigner,
9220 impl EntropySource for Keys {
9221 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9224 impl SignerProvider for Keys {
9225 type EcdsaSigner = InMemorySigner;
9227 type TaprootSigner = InMemorySigner;
9229 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9230 self.signer.channel_keys_id()
9233 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9237 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9239 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9240 let secp_ctx = Secp256k1::signing_only();
9241 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9242 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9243 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9246 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9247 let secp_ctx = Secp256k1::signing_only();
9248 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9249 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9253 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9254 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9255 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9259 fn upfront_shutdown_script_incompatibility() {
9260 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9261 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9262 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9265 let seed = [42; 32];
9266 let network = Network::Testnet;
9267 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9268 keys_provider.expect(OnGetShutdownScriptpubkey {
9269 returns: non_v0_segwit_shutdown_script.clone(),
9272 let secp_ctx = Secp256k1::new();
9273 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9274 let config = UserConfig::default();
9275 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9276 Err(APIError::IncompatibleShutdownScript { script }) => {
9277 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9279 Err(e) => panic!("Unexpected error: {:?}", e),
9280 Ok(_) => panic!("Expected error"),
9284 // Check that, during channel creation, we use the same feerate in the open channel message
9285 // as we do in the Channel object creation itself.
9287 fn test_open_channel_msg_fee() {
9288 let original_fee = 253;
9289 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9290 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9291 let secp_ctx = Secp256k1::new();
9292 let seed = [42; 32];
9293 let network = Network::Testnet;
9294 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9296 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9297 let config = UserConfig::default();
9298 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9300 // Now change the fee so we can check that the fee in the open_channel message is the
9301 // same as the old fee.
9302 fee_est.fee_est = 500;
9303 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9304 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9308 fn test_holder_vs_counterparty_dust_limit() {
9309 // Test that when calculating the local and remote commitment transaction fees, the correct
9310 // dust limits are used.
9311 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9312 let secp_ctx = Secp256k1::new();
9313 let seed = [42; 32];
9314 let network = Network::Testnet;
9315 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9316 let logger = test_utils::TestLogger::new();
9317 let best_block = BestBlock::from_network(network);
9319 // Go through the flow of opening a channel between two nodes, making sure
9320 // they have different dust limits.
9322 // Create Node A's channel pointing to Node B's pubkey
9323 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9324 let config = UserConfig::default();
9325 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9327 // Create Node B's channel by receiving Node A's open_channel message
9328 // Make sure A's dust limit is as we expect.
9329 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9330 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9331 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9333 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9334 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9335 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9336 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9337 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9339 // Node A --> Node B: funding created
9340 let output_script = node_a_chan.context.get_funding_redeemscript();
9341 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9342 value: 10000000, script_pubkey: output_script.clone(),
9344 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9345 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9346 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9348 // Node B --> Node A: funding signed
9349 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9350 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9352 // Put some inbound and outbound HTLCs in A's channel.
9353 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9354 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9356 amount_msat: htlc_amount_msat,
9357 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9358 cltv_expiry: 300000000,
9359 state: InboundHTLCState::Committed,
9362 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9364 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9365 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9366 cltv_expiry: 200000000,
9367 state: OutboundHTLCState::Committed,
9368 source: HTLCSource::OutboundRoute {
9369 path: Path { hops: Vec::new(), blinded_tail: None },
9370 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9371 first_hop_htlc_msat: 548,
9372 payment_id: PaymentId([42; 32]),
9374 skimmed_fee_msat: None,
9375 blinding_point: None,
9378 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9379 // the dust limit check.
9380 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9381 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9382 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9383 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9385 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9386 // of the HTLCs are seen to be above the dust limit.
9387 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9388 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9389 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9390 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9391 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9395 fn test_timeout_vs_success_htlc_dust_limit() {
9396 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9397 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9398 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9399 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9400 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9401 let secp_ctx = Secp256k1::new();
9402 let seed = [42; 32];
9403 let network = Network::Testnet;
9404 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9406 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9407 let config = UserConfig::default();
9408 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9410 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9411 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9413 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9414 // counted as dust when it shouldn't be.
9415 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9416 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9417 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9418 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9420 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9421 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9422 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9423 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9424 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9426 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9428 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9429 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9430 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9431 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9432 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9434 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9435 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9436 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9437 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9438 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9442 fn channel_reestablish_no_updates() {
9443 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9444 let logger = test_utils::TestLogger::new();
9445 let secp_ctx = Secp256k1::new();
9446 let seed = [42; 32];
9447 let network = Network::Testnet;
9448 let best_block = BestBlock::from_network(network);
9449 let chain_hash = ChainHash::using_genesis_block(network);
9450 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9452 // Go through the flow of opening a channel between two nodes.
9454 // Create Node A's channel pointing to Node B's pubkey
9455 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9456 let config = UserConfig::default();
9457 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9459 // Create Node B's channel by receiving Node A's open_channel message
9460 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9461 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9462 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9464 // Node B --> Node A: accept channel
9465 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9466 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9468 // Node A --> Node B: funding created
9469 let output_script = node_a_chan.context.get_funding_redeemscript();
9470 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9471 value: 10000000, script_pubkey: output_script.clone(),
9473 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9474 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9475 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9477 // Node B --> Node A: funding signed
9478 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9479 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9481 // Now disconnect the two nodes and check that the commitment point in
9482 // Node B's channel_reestablish message is sane.
9483 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9484 let msg = node_b_chan.get_channel_reestablish(&&logger);
9485 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9486 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9487 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9489 // Check that the commitment point in Node A's channel_reestablish message
9491 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9492 let msg = node_a_chan.get_channel_reestablish(&&logger);
9493 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9494 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9495 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9499 fn test_configured_holder_max_htlc_value_in_flight() {
9500 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9501 let logger = test_utils::TestLogger::new();
9502 let secp_ctx = Secp256k1::new();
9503 let seed = [42; 32];
9504 let network = Network::Testnet;
9505 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9506 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9507 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9509 let mut config_2_percent = UserConfig::default();
9510 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9511 let mut config_99_percent = UserConfig::default();
9512 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9513 let mut config_0_percent = UserConfig::default();
9514 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9515 let mut config_101_percent = UserConfig::default();
9516 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9518 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9519 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9520 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9521 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9522 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9523 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9525 // Test with the upper bound - 1 of valid values (99%).
9526 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9527 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9528 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9530 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9532 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9533 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9534 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9535 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9536 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9537 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9539 // Test with the upper bound - 1 of valid values (99%).
9540 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9541 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9542 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9544 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9545 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9546 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9547 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9548 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9550 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9551 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9553 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9554 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9555 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9557 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9558 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9559 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9560 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9561 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9563 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9564 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9566 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9567 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9568 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9572 fn test_configured_holder_selected_channel_reserve_satoshis() {
9574 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9575 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9576 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9578 // Test with valid but unreasonably high channel reserves
9579 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9580 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9581 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9583 // Test with calculated channel reserve less than lower bound
9584 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9585 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9587 // Test with invalid channel reserves since sum of both is greater than or equal
9589 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9590 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9593 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9594 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9595 let logger = test_utils::TestLogger::new();
9596 let secp_ctx = Secp256k1::new();
9597 let seed = [42; 32];
9598 let network = Network::Testnet;
9599 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9600 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9601 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9604 let mut outbound_node_config = UserConfig::default();
9605 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9606 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9608 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9609 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9611 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9612 let mut inbound_node_config = UserConfig::default();
9613 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9615 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9616 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9618 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9620 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9621 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9623 // Channel Negotiations failed
9624 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9625 assert!(result.is_err());
9630 fn channel_update() {
9631 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9632 let logger = test_utils::TestLogger::new();
9633 let secp_ctx = Secp256k1::new();
9634 let seed = [42; 32];
9635 let network = Network::Testnet;
9636 let best_block = BestBlock::from_network(network);
9637 let chain_hash = ChainHash::using_genesis_block(network);
9638 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9640 // Create Node A's channel pointing to Node B's pubkey
9641 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9642 let config = UserConfig::default();
9643 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9645 // Create Node B's channel by receiving Node A's open_channel message
9646 // Make sure A's dust limit is as we expect.
9647 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9648 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9649 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9651 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9652 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9653 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9654 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9655 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9657 // Node A --> Node B: funding created
9658 let output_script = node_a_chan.context.get_funding_redeemscript();
9659 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9660 value: 10000000, script_pubkey: output_script.clone(),
9662 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9663 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9664 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9666 // Node B --> Node A: funding signed
9667 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9668 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9670 // Make sure that receiving a channel update will update the Channel as expected.
9671 let update = ChannelUpdate {
9672 contents: UnsignedChannelUpdate {
9674 short_channel_id: 0,
9677 cltv_expiry_delta: 100,
9678 htlc_minimum_msat: 5,
9679 htlc_maximum_msat: MAX_VALUE_MSAT,
9681 fee_proportional_millionths: 11,
9682 excess_data: Vec::new(),
9684 signature: Signature::from(unsafe { FFISignature::new() })
9686 assert!(node_a_chan.channel_update(&update).unwrap());
9688 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9689 // change our official htlc_minimum_msat.
9690 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9691 match node_a_chan.context.counterparty_forwarding_info() {
9693 assert_eq!(info.cltv_expiry_delta, 100);
9694 assert_eq!(info.fee_base_msat, 110);
9695 assert_eq!(info.fee_proportional_millionths, 11);
9697 None => panic!("expected counterparty forwarding info to be Some")
9700 assert!(!node_a_chan.channel_update(&update).unwrap());
9704 fn blinding_point_skimmed_fee_malformed_ser() {
9705 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9707 let logger = test_utils::TestLogger::new();
9708 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9709 let secp_ctx = Secp256k1::new();
9710 let seed = [42; 32];
9711 let network = Network::Testnet;
9712 let best_block = BestBlock::from_network(network);
9713 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9715 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9716 let config = UserConfig::default();
9717 let features = channelmanager::provided_init_features(&config);
9718 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9719 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9721 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9722 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9723 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9725 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9726 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9727 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9729 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9730 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9731 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9732 Ok((chan, _, _)) => chan,
9733 Err((_, e)) => panic!("{}", e),
9736 let dummy_htlc_source = HTLCSource::OutboundRoute {
9738 hops: vec![RouteHop {
9739 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9740 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9741 cltv_expiry_delta: 0, maybe_announced_channel: false,
9745 session_priv: test_utils::privkey(42),
9746 first_hop_htlc_msat: 0,
9747 payment_id: PaymentId([42; 32]),
9749 let dummy_outbound_output = OutboundHTLCOutput {
9752 payment_hash: PaymentHash([43; 32]),
9754 state: OutboundHTLCState::Committed,
9755 source: dummy_htlc_source.clone(),
9756 skimmed_fee_msat: None,
9757 blinding_point: None,
9759 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9760 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9762 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9765 htlc.skimmed_fee_msat = Some(1);
9768 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9770 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9773 payment_hash: PaymentHash([43; 32]),
9774 source: dummy_htlc_source.clone(),
9775 onion_routing_packet: msgs::OnionPacket {
9777 public_key: Ok(test_utils::pubkey(1)),
9778 hop_data: [0; 20*65],
9781 skimmed_fee_msat: None,
9782 blinding_point: None,
9784 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9785 payment_preimage: PaymentPreimage([42; 32]),
9788 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9789 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9791 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9792 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9794 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9797 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9798 } else if i % 5 == 1 {
9799 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9800 } else if i % 5 == 2 {
9801 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9802 if let HTLCUpdateAwaitingACK::AddHTLC {
9803 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9804 } = &mut dummy_add {
9805 *blinding_point = Some(test_utils::pubkey(42 + i));
9806 *skimmed_fee_msat = Some(42);
9808 holding_cell_htlc_updates.push(dummy_add);
9809 } else if i % 5 == 3 {
9810 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9812 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9815 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9817 // Encode and decode the channel and ensure that the HTLCs within are the same.
9818 let encoded_chan = chan.encode();
9819 let mut s = crate::io::Cursor::new(&encoded_chan);
9820 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9821 let features = channelmanager::provided_channel_type_features(&config);
9822 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9823 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9824 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9827 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9829 fn outbound_commitment_test() {
9830 use bitcoin::sighash;
9831 use bitcoin::consensus::encode::serialize;
9832 use bitcoin::sighash::EcdsaSighashType;
9833 use bitcoin::hashes::hex::FromHex;
9834 use bitcoin::hash_types::Txid;
9835 use bitcoin::secp256k1::Message;
9836 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9837 use crate::ln::PaymentPreimage;
9838 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9839 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9840 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9841 use crate::util::logger::Logger;
9842 use crate::sync::Arc;
9843 use core::str::FromStr;
9844 use hex::DisplayHex;
9846 // Test vectors from BOLT 3 Appendices C and F (anchors):
9847 let feeest = TestFeeEstimator{fee_est: 15000};
9848 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9849 let secp_ctx = Secp256k1::new();
9851 let mut signer = InMemorySigner::new(
9853 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9854 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9855 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9856 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9857 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9859 // These aren't set in the test vectors:
9860 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9866 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9867 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9868 let keys_provider = Keys { signer: signer.clone() };
9870 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9871 let mut config = UserConfig::default();
9872 config.channel_handshake_config.announced_channel = false;
9873 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9874 chan.context.holder_dust_limit_satoshis = 546;
9875 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9877 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9879 let counterparty_pubkeys = ChannelPublicKeys {
9880 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9881 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9882 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9883 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9884 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9886 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9887 CounterpartyChannelTransactionParameters {
9888 pubkeys: counterparty_pubkeys.clone(),
9889 selected_contest_delay: 144
9891 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9892 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9894 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9895 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9897 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9898 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9900 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9901 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9903 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9904 // derived from a commitment_seed, so instead we copy it here and call
9905 // build_commitment_transaction.
9906 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9907 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9908 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9909 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9910 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9912 macro_rules! test_commitment {
9913 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9914 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9915 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9919 macro_rules! test_commitment_with_anchors {
9920 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9921 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9922 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9926 macro_rules! test_commitment_common {
9927 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9928 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9930 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9931 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9933 let htlcs = commitment_stats.htlcs_included.drain(..)
9934 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9936 (commitment_stats.tx, htlcs)
9938 let trusted_tx = commitment_tx.trust();
9939 let unsigned_tx = trusted_tx.built_transaction();
9940 let redeemscript = chan.context.get_funding_redeemscript();
9941 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9942 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9943 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9944 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9946 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9947 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9948 let mut counterparty_htlc_sigs = Vec::new();
9949 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9951 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9952 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9953 counterparty_htlc_sigs.push(remote_signature);
9955 assert_eq!(htlcs.len(), per_htlc.len());
9957 let holder_commitment_tx = HolderCommitmentTransaction::new(
9958 commitment_tx.clone(),
9959 counterparty_signature,
9960 counterparty_htlc_sigs,
9961 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9962 chan.context.counterparty_funding_pubkey()
9964 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9965 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9967 let funding_redeemscript = chan.context.get_funding_redeemscript();
9968 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9969 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9971 // ((htlc, counterparty_sig), (index, holder_sig))
9972 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9975 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9976 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9978 let ref htlc = htlcs[$htlc_idx];
9979 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9980 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9981 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9982 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9983 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9984 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9985 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9987 let mut preimage: Option<PaymentPreimage> = None;
9990 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9991 if out == htlc.payment_hash {
9992 preimage = Some(PaymentPreimage([i; 32]));
9996 assert!(preimage.is_some());
9999 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10000 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10001 channel_derivation_parameters: ChannelDerivationParameters {
10002 value_satoshis: chan.context.channel_value_satoshis,
10003 keys_id: chan.context.channel_keys_id,
10004 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10006 commitment_txid: trusted_tx.txid(),
10007 per_commitment_number: trusted_tx.commitment_number(),
10008 per_commitment_point: trusted_tx.per_commitment_point(),
10009 feerate_per_kw: trusted_tx.feerate_per_kw(),
10010 htlc: htlc.clone(),
10011 preimage: preimage.clone(),
10012 counterparty_sig: *htlc_counterparty_sig,
10013 }, &secp_ctx).unwrap();
10014 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10015 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10017 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10018 assert_eq!(signature, htlc_holder_sig, "htlc sig");
10019 let trusted_tx = holder_commitment_tx.trust();
10020 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10021 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10022 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10024 assert!(htlc_counterparty_sig_iter.next().is_none());
10028 // anchors: simple commitment tx with no HTLCs and single anchor
10029 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10030 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10031 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10033 // simple commitment tx with no HTLCs
10034 chan.context.value_to_self_msat = 7000000000;
10036 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10037 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10038 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10040 // anchors: simple commitment tx with no HTLCs
10041 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10042 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10043 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10045 chan.context.pending_inbound_htlcs.push({
10046 let mut out = InboundHTLCOutput{
10048 amount_msat: 1000000,
10050 payment_hash: PaymentHash([0; 32]),
10051 state: InboundHTLCState::Committed,
10053 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10056 chan.context.pending_inbound_htlcs.push({
10057 let mut out = InboundHTLCOutput{
10059 amount_msat: 2000000,
10061 payment_hash: PaymentHash([0; 32]),
10062 state: InboundHTLCState::Committed,
10064 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10067 chan.context.pending_outbound_htlcs.push({
10068 let mut out = OutboundHTLCOutput{
10070 amount_msat: 2000000,
10072 payment_hash: PaymentHash([0; 32]),
10073 state: OutboundHTLCState::Committed,
10074 source: HTLCSource::dummy(),
10075 skimmed_fee_msat: None,
10076 blinding_point: None,
10078 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10081 chan.context.pending_outbound_htlcs.push({
10082 let mut out = OutboundHTLCOutput{
10084 amount_msat: 3000000,
10086 payment_hash: PaymentHash([0; 32]),
10087 state: OutboundHTLCState::Committed,
10088 source: HTLCSource::dummy(),
10089 skimmed_fee_msat: None,
10090 blinding_point: None,
10092 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10095 chan.context.pending_inbound_htlcs.push({
10096 let mut out = InboundHTLCOutput{
10098 amount_msat: 4000000,
10100 payment_hash: PaymentHash([0; 32]),
10101 state: InboundHTLCState::Committed,
10103 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10107 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10108 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10109 chan.context.feerate_per_kw = 0;
10111 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10112 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10113 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10116 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10117 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10118 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10121 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10122 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10123 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10126 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10127 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10128 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10131 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10132 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10133 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10136 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10137 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10138 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10141 // commitment tx with seven outputs untrimmed (maximum feerate)
10142 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10143 chan.context.feerate_per_kw = 647;
10145 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10146 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10147 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10150 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10151 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10152 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10155 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10156 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10157 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10160 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10161 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10162 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10165 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10166 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10167 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10170 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10171 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10172 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10175 // commitment tx with six outputs untrimmed (minimum feerate)
10176 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10177 chan.context.feerate_per_kw = 648;
10179 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10180 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10181 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10184 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10185 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10186 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10189 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10190 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10191 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10194 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10195 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10196 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10199 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10200 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10201 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10204 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10205 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10206 chan.context.feerate_per_kw = 645;
10207 chan.context.holder_dust_limit_satoshis = 1001;
10209 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10210 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10211 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10214 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10215 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10216 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10219 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10220 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10221 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10224 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10225 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10226 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10229 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10230 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10231 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10234 // commitment tx with six outputs untrimmed (maximum feerate)
10235 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10236 chan.context.feerate_per_kw = 2069;
10237 chan.context.holder_dust_limit_satoshis = 546;
10239 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10240 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10241 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10244 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10245 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10246 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10249 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10250 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10251 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10254 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10255 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10256 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10259 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10260 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10261 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10264 // commitment tx with five outputs untrimmed (minimum feerate)
10265 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10266 chan.context.feerate_per_kw = 2070;
10268 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10269 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10270 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10273 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10274 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10275 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10278 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10279 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10280 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10283 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10284 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10285 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10288 // commitment tx with five outputs untrimmed (maximum feerate)
10289 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10290 chan.context.feerate_per_kw = 2194;
10292 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10293 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10294 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10297 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10298 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10299 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10302 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10303 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10304 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10307 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10308 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10309 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10312 // commitment tx with four outputs untrimmed (minimum feerate)
10313 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10314 chan.context.feerate_per_kw = 2195;
10316 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10317 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10318 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10321 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10322 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10323 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10326 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10327 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10328 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10331 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10332 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10333 chan.context.feerate_per_kw = 2185;
10334 chan.context.holder_dust_limit_satoshis = 2001;
10335 let cached_channel_type = chan.context.channel_type;
10336 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10338 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10339 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10340 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10343 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10344 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10345 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10348 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10349 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10350 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10353 // commitment tx with four outputs untrimmed (maximum feerate)
10354 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10355 chan.context.feerate_per_kw = 3702;
10356 chan.context.holder_dust_limit_satoshis = 546;
10357 chan.context.channel_type = cached_channel_type.clone();
10359 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10360 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10361 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10364 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10365 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10366 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10369 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10370 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10371 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10374 // commitment tx with three outputs untrimmed (minimum feerate)
10375 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10376 chan.context.feerate_per_kw = 3703;
10378 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10379 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10380 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10383 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10384 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10385 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10388 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10389 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10390 chan.context.feerate_per_kw = 3687;
10391 chan.context.holder_dust_limit_satoshis = 3001;
10392 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10394 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10395 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10396 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10399 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10400 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10401 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10404 // commitment tx with three outputs untrimmed (maximum feerate)
10405 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10406 chan.context.feerate_per_kw = 4914;
10407 chan.context.holder_dust_limit_satoshis = 546;
10408 chan.context.channel_type = cached_channel_type.clone();
10410 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10411 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10412 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10415 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10416 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10417 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10420 // commitment tx with two outputs untrimmed (minimum feerate)
10421 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10422 chan.context.feerate_per_kw = 4915;
10423 chan.context.holder_dust_limit_satoshis = 546;
10425 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10426 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10427 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10429 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10430 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10431 chan.context.feerate_per_kw = 4894;
10432 chan.context.holder_dust_limit_satoshis = 4001;
10433 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10435 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10436 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10437 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10439 // commitment tx with two outputs untrimmed (maximum feerate)
10440 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10441 chan.context.feerate_per_kw = 9651180;
10442 chan.context.holder_dust_limit_satoshis = 546;
10443 chan.context.channel_type = cached_channel_type.clone();
10445 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10446 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10447 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10449 // commitment tx with one output untrimmed (minimum feerate)
10450 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10451 chan.context.feerate_per_kw = 9651181;
10453 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10454 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10455 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10457 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10458 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10459 chan.context.feerate_per_kw = 6216010;
10460 chan.context.holder_dust_limit_satoshis = 4001;
10461 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10463 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10464 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10465 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10467 // commitment tx with fee greater than funder amount
10468 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10469 chan.context.feerate_per_kw = 9651936;
10470 chan.context.holder_dust_limit_satoshis = 546;
10471 chan.context.channel_type = cached_channel_type;
10473 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10474 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10475 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10477 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10478 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10479 chan.context.feerate_per_kw = 253;
10480 chan.context.pending_inbound_htlcs.clear();
10481 chan.context.pending_inbound_htlcs.push({
10482 let mut out = InboundHTLCOutput{
10484 amount_msat: 2000000,
10486 payment_hash: PaymentHash([0; 32]),
10487 state: InboundHTLCState::Committed,
10489 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10492 chan.context.pending_outbound_htlcs.clear();
10493 chan.context.pending_outbound_htlcs.push({
10494 let mut out = OutboundHTLCOutput{
10496 amount_msat: 5000001,
10498 payment_hash: PaymentHash([0; 32]),
10499 state: OutboundHTLCState::Committed,
10500 source: HTLCSource::dummy(),
10501 skimmed_fee_msat: None,
10502 blinding_point: None,
10504 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10507 chan.context.pending_outbound_htlcs.push({
10508 let mut out = OutboundHTLCOutput{
10510 amount_msat: 5000000,
10512 payment_hash: PaymentHash([0; 32]),
10513 state: OutboundHTLCState::Committed,
10514 source: HTLCSource::dummy(),
10515 skimmed_fee_msat: None,
10516 blinding_point: None,
10518 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10522 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10523 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10524 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10527 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10528 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10529 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10531 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10532 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10533 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10535 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10536 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10537 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10540 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10541 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10542 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10543 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10546 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10547 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10548 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10550 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10551 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10552 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10554 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10555 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10556 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10561 fn test_per_commitment_secret_gen() {
10562 // Test vectors from BOLT 3 Appendix D:
10564 let mut seed = [0; 32];
10565 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10566 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10567 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10569 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10570 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10571 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10573 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10574 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10576 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10577 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10579 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10580 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10581 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10585 fn test_key_derivation() {
10586 // Test vectors from BOLT 3 Appendix E:
10587 let secp_ctx = Secp256k1::new();
10589 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10590 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10592 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10593 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10595 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10596 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10598 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10599 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10601 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10602 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10604 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10605 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10609 fn test_zero_conf_channel_type_support() {
10610 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10611 let secp_ctx = Secp256k1::new();
10612 let seed = [42; 32];
10613 let network = Network::Testnet;
10614 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10615 let logger = test_utils::TestLogger::new();
10617 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10618 let config = UserConfig::default();
10619 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10620 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10622 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10623 channel_type_features.set_zero_conf_required();
10625 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10626 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10627 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10628 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10629 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10630 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10631 assert!(res.is_ok());
10635 fn test_supports_anchors_zero_htlc_tx_fee() {
10636 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10637 // resulting `channel_type`.
10638 let secp_ctx = Secp256k1::new();
10639 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10640 let network = Network::Testnet;
10641 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10642 let logger = test_utils::TestLogger::new();
10644 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10645 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10647 let mut config = UserConfig::default();
10648 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10650 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10651 // need to signal it.
10652 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10653 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10654 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10655 &config, 0, 42, None
10657 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10659 let mut expected_channel_type = ChannelTypeFeatures::empty();
10660 expected_channel_type.set_static_remote_key_required();
10661 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10663 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10664 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10665 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10669 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10670 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10671 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10672 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10673 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10676 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10677 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10681 fn test_rejects_implicit_simple_anchors() {
10682 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10683 // each side's `InitFeatures`, it is rejected.
10684 let secp_ctx = Secp256k1::new();
10685 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10686 let network = Network::Testnet;
10687 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10688 let logger = test_utils::TestLogger::new();
10690 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10691 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10693 let config = UserConfig::default();
10695 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10696 let static_remote_key_required: u64 = 1 << 12;
10697 let simple_anchors_required: u64 = 1 << 20;
10698 let raw_init_features = static_remote_key_required | simple_anchors_required;
10699 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10701 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10702 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10703 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10707 // Set `channel_type` to `None` to force the implicit feature negotiation.
10708 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10709 open_channel_msg.common_fields.channel_type = None;
10711 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10712 // `static_remote_key`, it will fail the channel.
10713 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10714 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10715 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10716 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10718 assert!(channel_b.is_err());
10722 fn test_rejects_simple_anchors_channel_type() {
10723 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10725 let secp_ctx = Secp256k1::new();
10726 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10727 let network = Network::Testnet;
10728 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10729 let logger = test_utils::TestLogger::new();
10731 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10732 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10734 let config = UserConfig::default();
10736 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10737 let static_remote_key_required: u64 = 1 << 12;
10738 let simple_anchors_required: u64 = 1 << 20;
10739 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10740 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10741 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10742 assert!(!simple_anchors_init.requires_unknown_bits());
10743 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10745 // First, we'll try to open a channel between A and B where A requests a channel type for
10746 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10747 // B as it's not supported by LDK.
10748 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10749 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10750 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10754 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10755 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10757 let res = InboundV1Channel::<&TestKeysInterface>::new(
10758 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10759 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10760 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10762 assert!(res.is_err());
10764 // Then, we'll try to open another channel where A requests a channel type for
10765 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10766 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10768 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10769 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10770 10000000, 100000, 42, &config, 0, 42, None
10773 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10775 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10776 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10777 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10778 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10781 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10782 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10784 let res = channel_a.accept_channel(
10785 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10787 assert!(res.is_err());
10791 fn test_waiting_for_batch() {
10792 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10793 let logger = test_utils::TestLogger::new();
10794 let secp_ctx = Secp256k1::new();
10795 let seed = [42; 32];
10796 let network = Network::Testnet;
10797 let best_block = BestBlock::from_network(network);
10798 let chain_hash = ChainHash::using_genesis_block(network);
10799 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10801 let mut config = UserConfig::default();
10802 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10803 // channel in a batch before all channels are ready.
10804 config.channel_handshake_limits.trust_own_funding_0conf = true;
10806 // Create a channel from node a to node b that will be part of batch funding.
10807 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10808 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10813 &channelmanager::provided_init_features(&config),
10823 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10824 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10825 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10830 &channelmanager::provided_channel_type_features(&config),
10831 &channelmanager::provided_init_features(&config),
10837 true, // Allow node b to send a 0conf channel_ready.
10840 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10841 node_a_chan.accept_channel(
10842 &accept_channel_msg,
10843 &config.channel_handshake_limits,
10844 &channelmanager::provided_init_features(&config),
10847 // Fund the channel with a batch funding transaction.
10848 let output_script = node_a_chan.context.get_funding_redeemscript();
10849 let tx = Transaction {
10851 lock_time: LockTime::ZERO,
10855 value: 10000000, script_pubkey: output_script.clone(),
10858 value: 10000000, script_pubkey: Builder::new().into_script(),
10861 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10862 let funding_created_msg = node_a_chan.get_funding_created(
10863 tx.clone(), funding_outpoint, true, &&logger,
10864 ).map_err(|_| ()).unwrap();
10865 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10866 &funding_created_msg.unwrap(),
10870 ).map_err(|_| ()).unwrap();
10871 let node_b_updates = node_b_chan.monitor_updating_restored(
10879 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10880 // broadcasting the funding transaction until the batch is ready.
10881 let res = node_a_chan.funding_signed(
10882 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10884 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10885 let node_a_updates = node_a_chan.monitor_updating_restored(
10892 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10893 // as the funding transaction depends on all channels in the batch becoming ready.
10894 assert!(node_a_updates.channel_ready.is_none());
10895 assert!(node_a_updates.funding_broadcastable.is_none());
10896 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10898 // It is possible to receive a 0conf channel_ready from the remote node.
10899 node_a_chan.channel_ready(
10900 &node_b_updates.channel_ready.unwrap(),
10908 node_a_chan.context.channel_state,
10909 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10912 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10913 node_a_chan.set_batch_ready();
10914 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10915 assert!(node_a_chan.check_get_channel_ready(0).is_some());