1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 /// Exposes the state of pending inbound HTLCs.
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 /// through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 /// through the exchange of commitment_signed and revoke_and_ack messages.
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178 /// before this HTLC is included on the remote commitment transaction.
179 AwaitingRemoteRevokeToAdd,
180 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181 /// and is included in both commitment transactions.
183 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186 /// payment, it will only be claimed together with other required parts.
188 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191 /// commitment transaction after update_fulfill_htlc.
192 AwaitingRemoteRevokeToRemoveFulfill,
193 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
197 AwaitingRemoteRevokeToRemoveFail,
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
203 InboundHTLCState::RemoteAnnounced(_) => None,
204 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208 InboundHTLCState::Committed =>
209 Some(InboundHTLCStateDetails::Committed),
210 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221 (0, AwaitingRemoteRevokeToAdd) => {},
222 (2, Committed) => {},
223 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224 (6, AwaitingRemoteRevokeToRemoveFail) => {};
227 struct InboundHTLCOutput {
231 payment_hash: PaymentHash,
232 state: InboundHTLCState,
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
239 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241 /// and not part of any commitment transaction.
243 /// The amount in msat.
244 pub amount_msat: u64,
245 /// The block height at which this HTLC expires.
246 pub cltv_expiry: u32,
247 /// The payment hash.
248 pub payment_hash: PaymentHash,
249 /// The state of the HTLC in the state machine.
251 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252 /// waiting for to advance to the next state.
254 /// See [`InboundHTLCStateDetails`] for information on the specific states.
256 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257 /// states may result in `None` here.
258 pub state: Option<InboundHTLCStateDetails>,
259 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260 /// from the local commitment transaction and added to the commitment transaction fee.
261 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262 /// transactions as well.
264 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
268 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274 (0, htlc_id, required),
275 (2, amount_msat, required),
276 (4, cltv_expiry, required),
277 (6, payment_hash, required),
278 (7, state, upgradable_option),
279 (8, is_dust, required),
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286 /// we will promote to Committed (note that they may not accept it until the next time we
287 /// revoke, but we don't really care about that:
288 /// * they've revoked, so worst case we can announce an old state and get our (option on)
289 /// money back (though we won't), and,
290 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
291 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
292 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293 /// we'll never get out of sync).
294 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295 /// OutboundHTLCOutput's size just for a temporary bit
296 LocalAnnounced(Box<msgs::OnionPacket>),
298 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299 /// the change (though they'll need to revoke before we fail the payment).
300 RemoteRemoved(OutboundHTLCOutcome),
301 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305 /// remote revoke_and_ack on a previous state before we can do so.
306 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311 /// revoke_and_ack to drop completely.
312 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
315 /// Exposes the state of pending outbound HTLCs.
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 /// through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 /// through the exchange of commitment_signed and revoke_and_ack messages.
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331 /// on the remote's commitment transaction after update_add_htlc.
332 AwaitingRemoteRevokeToAdd,
333 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334 /// and receiving revoke_and_ack in return.
336 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
339 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342 /// for the removal from its commitment transaction.
343 AwaitingRemoteRevokeToRemoveSuccess,
344 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347 /// for the removal from its commitment transaction.
348 AwaitingRemoteRevokeToRemoveFailure,
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
354 OutboundHTLCState::LocalAnnounced(_) =>
355 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356 OutboundHTLCState::Committed =>
357 OutboundHTLCStateDetails::Committed,
358 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
360 OutboundHTLCState::RemoteRemoved(_) =>
361 OutboundHTLCStateDetails::Committed,
362 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375 (0, AwaitingRemoteRevokeToAdd) => {},
376 (2, Committed) => {},
377 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384 /// LDK version 0.0.105+ will always fill in the preimage here.
385 Success(Option<PaymentPreimage>),
386 Failure(HTLCFailReason),
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390 fn from(o: Option<HTLCFailReason>) -> Self {
392 None => OutboundHTLCOutcome::Success(None),
393 Some(r) => OutboundHTLCOutcome::Failure(r)
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399 fn into(self) -> Option<&'a HTLCFailReason> {
401 OutboundHTLCOutcome::Success(_) => None,
402 OutboundHTLCOutcome::Failure(ref r) => Some(r)
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
412 payment_hash: PaymentHash,
413 state: OutboundHTLCState,
415 blinding_point: Option<PublicKey>,
416 skimmed_fee_msat: Option<u64>,
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
423 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425 /// and not part of any commitment transaction.
427 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428 pub htlc_id: Option<u64>,
429 /// The amount in msat.
430 pub amount_msat: u64,
431 /// The block height at which this HTLC expires.
432 pub cltv_expiry: u32,
433 /// The payment hash.
434 pub payment_hash: PaymentHash,
435 /// The state of the HTLC in the state machine.
437 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438 /// waiting for to advance to the next state.
440 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
442 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443 /// states may result in `None` here.
444 pub state: Option<OutboundHTLCStateDetails>,
445 /// The extra fee being skimmed off the top of this HTLC.
446 pub skimmed_fee_msat: Option<u64>,
447 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448 /// from the local commitment transaction and added to the commitment transaction fee.
449 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450 /// transactions as well.
452 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
456 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462 (0, htlc_id, required),
463 (2, amount_msat, required),
464 (4, cltv_expiry, required),
465 (6, payment_hash, required),
466 (7, state, upgradable_option),
467 (8, skimmed_fee_msat, required),
468 (10, is_dust, required),
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
478 payment_hash: PaymentHash,
480 onion_routing_packet: msgs::OnionPacket,
481 // The extra fee we're skimming off the top of this HTLC.
482 skimmed_fee_msat: Option<u64>,
483 blinding_point: Option<PublicKey>,
486 payment_preimage: PaymentPreimage,
491 err_packet: msgs::OnionErrorPacket,
496 sha256_of_onion: [u8; 32],
500 macro_rules! define_state_flags {
501 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502 #[doc = $flag_type_doc]
503 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504 struct $flag_type(u32);
509 const $flag: $flag_type = $flag_type($value);
512 /// All flags that apply to the specified [`ChannelState`] variant.
514 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
517 fn new() -> Self { Self(0) }
520 fn from_u32(flags: u32) -> Result<Self, ()> {
521 if flags & !Self::ALL.0 != 0 {
524 Ok($flag_type(flags))
529 fn is_empty(&self) -> bool { self.0 == 0 }
531 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
533 fn set(&mut self, flag: Self) { *self |= flag }
535 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
539 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
542 impl core::ops::BitOr for $flag_type {
544 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
546 impl core::ops::BitOrAssign for $flag_type {
547 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
549 impl core::ops::BitAnd for $flag_type {
551 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
553 impl core::ops::BitAndAssign for $flag_type {
554 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
557 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
560 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
563 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
565 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
567 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
570 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
573 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
582 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
584 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
586 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
589 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
591 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
593 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
596 impl PartialEq<FundedStateFlags> for $flag_type {
597 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
599 impl From<FundedStateFlags> for $flag_type {
600 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
605 /// We declare all the states/flags here together to help determine which bits are still available
608 pub const OUR_INIT_SENT: u32 = 1 << 0;
609 pub const THEIR_INIT_SENT: u32 = 1 << 1;
610 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614 pub const CHANNEL_READY: u32 = 1 << 6;
615 pub const PEER_DISCONNECTED: u32 = 1 << 7;
616 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
625 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
627 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631 somewhere and we should pause sending any outbound messages until they've managed to \
632 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
645 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646 NegotiatingFundingFlags, [
647 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
655 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656 FUNDED_STATE, AwaitingChannelReadyFlags, [
657 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666 is being held until all channels in the batch have received `funding_signed` and have \
667 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
673 "Flags that only apply to [`ChannelState::ChannelReady`].",
674 FUNDED_STATE, ChannelReadyFlags, [
675 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
688 /// We are negotiating the parameters required for the channel prior to funding it.
689 NegotiatingFunding(NegotiatingFundingFlags),
690 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
694 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695 /// funding transaction to confirm.
696 AwaitingChannelReady(AwaitingChannelReadyFlags),
697 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
699 ChannelReady(ChannelReadyFlags),
700 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701 /// is about to drop us, but we store this anyway.
705 macro_rules! impl_state_flag {
706 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
708 fn $get(&self) -> bool {
711 ChannelState::$state(flags) => flags.$get(),
720 ChannelState::$state(flags) => flags.$set(),
722 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
726 fn $clear(&mut self) {
729 ChannelState::$state(flags) => { let _ = flags.$clear(); },
731 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
735 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
738 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739 impl_state_flag!($get, $set, $clear, [$state]);
744 fn from_u32(state: u32) -> Result<Self, ()> {
746 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
749 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754 .map(|flags| ChannelState::ChannelReady(flags))
755 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756 Ok(ChannelState::NegotiatingFunding(flags))
764 fn to_u32(&self) -> u32 {
766 ChannelState::NegotiatingFunding(flags) => flags.0,
767 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
774 fn is_pre_funded_state(&self) -> bool {
775 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
778 fn is_both_sides_shutdown(&self) -> bool {
779 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
782 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
784 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786 _ => FundedStateFlags::new(),
790 fn can_generate_new_commitment(&self) -> bool {
792 ChannelState::ChannelReady(flags) =>
793 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
797 debug_assert!(false, "Can only generate new commitment within ChannelReady");
803 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
875 impl fmt::Debug for ChannelError {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
878 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
885 impl fmt::Display for ChannelError {
886 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
888 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889 &ChannelError::Warn(ref e) => write!(f, "{}", e),
890 &ChannelError::Close(ref e) => write!(f, "{}", e),
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
897 pub peer_id: Option<PublicKey>,
898 pub channel_id: Option<ChannelId>,
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902 fn log(&self, mut record: Record) {
903 record.peer_id = self.peer_id;
904 record.channel_id = self.channel_id;
905 self.logger.log(record)
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912 where S::Target: SignerProvider
916 peer_id: Some(context.counterparty_node_id),
917 channel_id: Some(context.channel_id),
922 macro_rules! secp_check {
923 ($res: expr, $err: expr) => {
926 Err(_) => return Err(ChannelError::Close($err)),
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938 /// We've announced the channel as enabled and are connected to our peer.
940 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
942 /// Our channel is live again, but we haven't announced the channel as enabled yet.
944 /// We've announced the channel as disabled.
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
950 pub enum AnnouncementSigsState {
951 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952 /// we sent the last `AnnouncementSignatures`.
954 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955 /// This state never appears on disk - instead we write `NotSent`.
957 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960 /// they send back a `RevokeAndACK`.
961 /// This state never appears on disk - instead we write `NotSent`.
963 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
968 /// An enum indicating whether the local or remote side offered a given HTLC.
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
977 pending_htlcs_value_msat: u64,
978 on_counterparty_tx_dust_exposure_msat: u64,
979 on_holder_tx_dust_exposure_msat: u64,
980 holding_cell_msat: u64,
981 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986 tx: CommitmentTransaction, // the transaction info
987 feerate_per_kw: u32, // the feerate included to build the transaction
988 total_fee_sat: u64, // the total fee included in the transaction
989 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
990 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991 local_balance_msat: u64, // local balance before fees *not* considering dust limits
992 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
1000 origin: HTLCInitiator,
1003 impl HTLCCandidate {
1004 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1014 enum UpdateFulfillFetch {
1016 monitor_update: ChannelMonitorUpdate,
1017 htlc_value_msat: u64,
1018 msg: Option<msgs::UpdateFulfillHTLC>,
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027 /// previously placed in the holding cell (and has since been removed).
1029 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030 monitor_update: ChannelMonitorUpdate,
1031 /// The value of the HTLC which was claimed, in msat.
1032 htlc_value_msat: u64,
1034 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035 /// or has been forgotten (presumably previously claimed).
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041 pub raa: Option<msgs::RevokeAndACK>,
1042 pub commitment_update: Option<msgs::CommitmentUpdate>,
1043 pub order: RAACommitmentOrder,
1044 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047 pub funding_broadcastable: Option<Transaction>,
1048 pub channel_ready: Option<msgs::ChannelReady>,
1049 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1052 /// The return value of `signer_maybe_unblocked`
1054 pub(super) struct SignerResumeUpdates {
1055 pub commitment_update: Option<msgs::CommitmentUpdate>,
1056 pub funding_signed: Option<msgs::FundingSigned>,
1057 pub channel_ready: Option<msgs::ChannelReady>,
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062 pub channel_ready: Option<msgs::ChannelReady>,
1063 pub raa: Option<msgs::RevokeAndACK>,
1064 pub commitment_update: Option<msgs::CommitmentUpdate>,
1065 pub order: RAACommitmentOrder,
1066 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067 pub shutdown_msg: Option<msgs::Shutdown>,
1070 /// The result of a shutdown that should be handled.
1072 pub(crate) struct ShutdownResult {
1073 pub(crate) closure_reason: ClosureReason,
1074 /// A channel monitor update to apply.
1075 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079 /// propagated to the remainder of the batch.
1080 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081 pub(crate) channel_id: ChannelId,
1082 pub(crate) user_channel_id: u128,
1083 pub(crate) channel_capacity_satoshis: u64,
1084 pub(crate) counterparty_node_id: PublicKey,
1085 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086 pub(crate) channel_funding_txo: Option<OutPoint>,
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1130 /// * The expected interval between ticks (1 minute).
1131 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1151 struct PendingChannelMonitorUpdate {
1152 update: ChannelMonitorUpdate,
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156 (0, update, required),
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162 UnfundedOutboundV1(OutboundV1Channel<SP>),
1163 UnfundedInboundV1(InboundV1Channel<SP>),
1164 Funded(Channel<SP>),
1167 impl<'a, SP: Deref> ChannelPhase<SP> where
1168 SP::Target: SignerProvider,
1169 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1171 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1173 ChannelPhase::Funded(chan) => &chan.context,
1174 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1175 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1179 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1181 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1182 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1183 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1188 /// Contains all state common to unfunded inbound/outbound channels.
1189 pub(super) struct UnfundedChannelContext {
1190 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1191 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1192 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1194 /// This is so that we don't keep channels around that haven't progressed to a funded state
1195 /// in a timely manner.
1196 unfunded_channel_age_ticks: usize,
1199 impl UnfundedChannelContext {
1200 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1201 /// having reached the unfunded channel age limit.
1203 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1204 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1205 self.unfunded_channel_age_ticks += 1;
1206 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1210 /// Contains everything about the channel including state, and various flags.
1211 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1212 config: LegacyChannelConfig,
1214 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1215 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1216 // have elapsed since the update occurred.
1217 prev_config: Option<(ChannelConfig, usize)>,
1219 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1223 /// The current channel ID.
1224 channel_id: ChannelId,
1225 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1226 /// Will be `None` for channels created prior to 0.0.115.
1227 temporary_channel_id: Option<ChannelId>,
1228 channel_state: ChannelState,
1230 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1231 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1233 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1234 // Note that a number of our tests were written prior to the behavior here which retransmits
1235 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1237 #[cfg(any(test, feature = "_test_utils"))]
1238 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1239 #[cfg(not(any(test, feature = "_test_utils")))]
1240 announcement_sigs_state: AnnouncementSigsState,
1242 secp_ctx: Secp256k1<secp256k1::All>,
1243 channel_value_satoshis: u64,
1245 latest_monitor_update_id: u64,
1247 holder_signer: ChannelSignerType<SP>,
1248 shutdown_scriptpubkey: Option<ShutdownScript>,
1249 destination_script: ScriptBuf,
1251 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1252 // generation start at 0 and count up...this simplifies some parts of implementation at the
1253 // cost of others, but should really just be changed.
1255 cur_holder_commitment_transaction_number: u64,
1256 cur_counterparty_commitment_transaction_number: u64,
1257 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1258 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1259 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1260 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1262 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1263 /// need to ensure we resend them in the order we originally generated them. Note that because
1264 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1265 /// sufficient to simply set this to the opposite of any message we are generating as we
1266 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1267 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1269 resend_order: RAACommitmentOrder,
1271 monitor_pending_channel_ready: bool,
1272 monitor_pending_revoke_and_ack: bool,
1273 monitor_pending_commitment_signed: bool,
1275 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1276 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1277 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1278 // but need to handle this somehow or we run the risk of losing HTLCs!
1279 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1280 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1281 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1283 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1284 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1285 /// the future when the signer indicates it may have a signature for us.
1287 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1288 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1289 signer_pending_commitment_update: bool,
1290 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1291 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1292 /// outbound or inbound.
1293 signer_pending_funding: bool,
1295 // pending_update_fee is filled when sending and receiving update_fee.
1297 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1298 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1299 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1300 // HTLCs with similar state.
1301 pending_update_fee: Option<(u32, FeeUpdateState)>,
1302 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1303 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1304 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1305 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1306 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1307 holding_cell_update_fee: Option<u32>,
1308 next_holder_htlc_id: u64,
1309 next_counterparty_htlc_id: u64,
1310 feerate_per_kw: u32,
1312 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1313 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1314 /// new block is received, ensuring it's always at least moderately close to the current real
1316 update_time_counter: u32,
1318 #[cfg(debug_assertions)]
1319 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1320 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1321 #[cfg(debug_assertions)]
1322 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1323 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1325 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1326 target_closing_feerate_sats_per_kw: Option<u32>,
1328 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1329 /// update, we need to delay processing it until later. We do that here by simply storing the
1330 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1331 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1333 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1334 /// transaction. These are set once we reach `closing_negotiation_ready`.
1336 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1338 closing_fee_limits: Option<(u64, u64)>,
1340 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1341 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1342 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1343 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1344 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1346 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1347 /// until we see a `commitment_signed` before doing so.
1349 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1350 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1351 expecting_peer_commitment_signed: bool,
1353 /// The hash of the block in which the funding transaction was included.
1354 funding_tx_confirmed_in: Option<BlockHash>,
1355 funding_tx_confirmation_height: u32,
1356 short_channel_id: Option<u64>,
1357 /// Either the height at which this channel was created or the height at which it was last
1358 /// serialized if it was serialized by versions prior to 0.0.103.
1359 /// We use this to close if funding is never broadcasted.
1360 channel_creation_height: u32,
1362 counterparty_dust_limit_satoshis: u64,
1365 pub(super) holder_dust_limit_satoshis: u64,
1367 holder_dust_limit_satoshis: u64,
1370 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1372 counterparty_max_htlc_value_in_flight_msat: u64,
1375 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1377 holder_max_htlc_value_in_flight_msat: u64,
1379 /// minimum channel reserve for self to maintain - set by them.
1380 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1383 pub(super) holder_selected_channel_reserve_satoshis: u64,
1385 holder_selected_channel_reserve_satoshis: u64,
1387 counterparty_htlc_minimum_msat: u64,
1388 holder_htlc_minimum_msat: u64,
1390 pub counterparty_max_accepted_htlcs: u16,
1392 counterparty_max_accepted_htlcs: u16,
1393 holder_max_accepted_htlcs: u16,
1394 minimum_depth: Option<u32>,
1396 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1398 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1399 funding_transaction: Option<Transaction>,
1400 is_batch_funding: Option<()>,
1402 counterparty_cur_commitment_point: Option<PublicKey>,
1403 counterparty_prev_commitment_point: Option<PublicKey>,
1404 counterparty_node_id: PublicKey,
1406 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1408 commitment_secrets: CounterpartyCommitmentSecrets,
1410 channel_update_status: ChannelUpdateStatus,
1411 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1412 /// not complete within a single timer tick (one minute), we should force-close the channel.
1413 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1415 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1416 /// our peer and start the closing_signed negotiation fresh.
1417 closing_signed_in_flight: bool,
1419 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1420 /// This can be used to rebroadcast the channel_announcement message later.
1421 announcement_sigs: Option<(Signature, Signature)>,
1423 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1424 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1425 // be, by comparing the cached values to the fee of the tranaction generated by
1426 // `build_commitment_transaction`.
1427 #[cfg(any(test, fuzzing))]
1428 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1429 #[cfg(any(test, fuzzing))]
1430 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1432 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1433 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1434 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1435 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1436 /// message until we receive a channel_reestablish.
1438 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1439 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1441 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1442 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1443 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1444 /// unblock the state machine.
1446 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1447 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1448 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1450 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1451 /// [`msgs::RevokeAndACK`] message from the counterparty.
1452 sent_message_awaiting_response: Option<usize>,
1454 #[cfg(any(test, fuzzing))]
1455 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1456 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1457 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1458 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1459 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1460 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1461 historical_inbound_htlc_fulfills: HashSet<u64>,
1463 /// This channel's type, as negotiated during channel open
1464 channel_type: ChannelTypeFeatures,
1466 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1467 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1468 // the channel's funding UTXO.
1470 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1471 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1472 // associated channel mapping.
1474 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1475 // to store all of them.
1476 latest_inbound_scid_alias: Option<u64>,
1478 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1479 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1480 // don't currently support node id aliases and eventually privacy should be provided with
1481 // blinded paths instead of simple scid+node_id aliases.
1482 outbound_scid_alias: u64,
1484 // We track whether we already emitted a `ChannelPending` event.
1485 channel_pending_event_emitted: bool,
1487 // We track whether we already emitted a `ChannelReady` event.
1488 channel_ready_event_emitted: bool,
1490 /// Some if we initiated to shut down the channel.
1491 local_initiated_shutdown: Option<()>,
1493 /// The unique identifier used to re-derive the private key material for the channel through
1494 /// [`SignerProvider::derive_channel_signer`].
1496 channel_keys_id: [u8; 32],
1498 pub channel_keys_id: [u8; 32],
1500 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1501 /// store it here and only release it to the `ChannelManager` once it asks for it.
1502 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1505 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1506 fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1507 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1508 entropy_source: &'a ES,
1509 signer_provider: &'a SP,
1510 counterparty_node_id: PublicKey,
1511 their_features: &'a InitFeatures,
1513 config: &'a UserConfig,
1514 current_chain_height: u32,
1517 our_funding_satoshis: u64,
1518 counterparty_pubkeys: ChannelPublicKeys,
1519 channel_type: ChannelTypeFeatures,
1520 holder_selected_channel_reserve_satoshis: u64,
1521 msg_channel_reserve_satoshis: u64,
1523 open_channel_fields: msgs::CommonOpenChannelFields,
1524 ) -> Result<ChannelContext<SP>, ChannelError>
1526 ES::Target: EntropySource,
1527 F::Target: FeeEstimator,
1529 SP::Target: SignerProvider,
1531 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1532 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1534 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1536 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1537 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1538 let pubkeys = holder_signer.pubkeys().clone();
1540 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1541 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1544 // Check sanity of message fields:
1545 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1546 return Err(ChannelError::Close(format!(
1547 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1548 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1549 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1551 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1552 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1554 if msg_channel_reserve_satoshis > channel_value_satoshis {
1555 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1557 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1558 if msg_push_msat > full_channel_value_msat {
1559 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1561 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1562 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1564 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1565 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1567 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1569 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1570 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1571 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1573 if open_channel_fields.max_accepted_htlcs < 1 {
1574 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1576 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1577 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1580 // Now check against optional parameters as set by config...
1581 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1582 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1584 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1585 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1587 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1588 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1590 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1591 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1593 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1594 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1596 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1597 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1599 if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1600 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1603 // Convert things into internal flags and prep our state:
1605 if config.channel_handshake_limits.force_announced_channel_preference {
1606 if config.channel_handshake_config.announced_channel != announced_channel {
1607 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1611 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1612 // Protocol level safety check in place, although it should never happen because
1613 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1614 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1616 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1617 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1619 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1620 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1621 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1623 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1624 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1627 // check if the funder's amount for the initial commitment tx is sufficient
1628 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1629 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1630 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1634 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1635 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1636 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1637 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1640 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1641 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1642 // want to push much to us), our counterparty should always have more than our reserve.
1643 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1644 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1647 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1648 match &open_channel_fields.shutdown_scriptpubkey {
1649 &Some(ref script) => {
1650 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1651 if script.len() == 0 {
1654 if !script::is_bolt2_compliant(&script, their_features) {
1655 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1657 Some(script.clone())
1660 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1662 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1667 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1668 match signer_provider.get_shutdown_scriptpubkey() {
1669 Ok(scriptpubkey) => Some(scriptpubkey),
1670 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1674 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1675 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1676 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1680 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1681 Ok(script) => script,
1682 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1685 let mut secp_ctx = Secp256k1::new();
1686 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1688 let minimum_depth = if is_0conf {
1691 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1694 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1696 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1698 let channel_context = ChannelContext {
1701 config: LegacyChannelConfig {
1702 options: config.channel_config.clone(),
1704 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1709 inbound_handshake_limits_override: None,
1711 temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1712 channel_id: open_channel_fields.temporary_channel_id,
1713 channel_state: ChannelState::NegotiatingFunding(
1714 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1716 announcement_sigs_state: AnnouncementSigsState::NotSent,
1719 latest_monitor_update_id: 0,
1721 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1722 shutdown_scriptpubkey,
1725 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1726 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1729 pending_inbound_htlcs: Vec::new(),
1730 pending_outbound_htlcs: Vec::new(),
1731 holding_cell_htlc_updates: Vec::new(),
1732 pending_update_fee: None,
1733 holding_cell_update_fee: None,
1734 next_holder_htlc_id: 0,
1735 next_counterparty_htlc_id: 0,
1736 update_time_counter: 1,
1738 resend_order: RAACommitmentOrder::CommitmentFirst,
1740 monitor_pending_channel_ready: false,
1741 monitor_pending_revoke_and_ack: false,
1742 monitor_pending_commitment_signed: false,
1743 monitor_pending_forwards: Vec::new(),
1744 monitor_pending_failures: Vec::new(),
1745 monitor_pending_finalized_fulfills: Vec::new(),
1747 signer_pending_commitment_update: false,
1748 signer_pending_funding: false,
1751 #[cfg(debug_assertions)]
1752 holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1753 #[cfg(debug_assertions)]
1754 counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1756 last_sent_closing_fee: None,
1757 pending_counterparty_closing_signed: None,
1758 expecting_peer_commitment_signed: false,
1759 closing_fee_limits: None,
1760 target_closing_feerate_sats_per_kw: None,
1762 funding_tx_confirmed_in: None,
1763 funding_tx_confirmation_height: 0,
1764 short_channel_id: None,
1765 channel_creation_height: current_chain_height,
1767 feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1768 channel_value_satoshis,
1769 counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1770 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1771 counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1772 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1773 counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1774 holder_selected_channel_reserve_satoshis,
1775 counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1776 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1777 counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1778 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1781 counterparty_forwarding_info: None,
1783 channel_transaction_parameters: ChannelTransactionParameters {
1784 holder_pubkeys: pubkeys,
1785 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1786 is_outbound_from_holder: false,
1787 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1788 selected_contest_delay: open_channel_fields.to_self_delay,
1789 pubkeys: counterparty_pubkeys,
1791 funding_outpoint: None,
1792 channel_type_features: channel_type.clone()
1794 funding_transaction: None,
1795 is_batch_funding: None,
1797 counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1798 counterparty_prev_commitment_point: None,
1799 counterparty_node_id,
1801 counterparty_shutdown_scriptpubkey,
1803 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1805 channel_update_status: ChannelUpdateStatus::Enabled,
1806 closing_signed_in_flight: false,
1808 announcement_sigs: None,
1810 #[cfg(any(test, fuzzing))]
1811 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1812 #[cfg(any(test, fuzzing))]
1813 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1815 workaround_lnd_bug_4006: None,
1816 sent_message_awaiting_response: None,
1818 latest_inbound_scid_alias: None,
1819 outbound_scid_alias: 0,
1821 channel_pending_event_emitted: false,
1822 channel_ready_event_emitted: false,
1824 #[cfg(any(test, fuzzing))]
1825 historical_inbound_htlc_fulfills: new_hash_set(),
1830 local_initiated_shutdown: None,
1832 blocked_monitor_updates: Vec::new(),
1838 fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1839 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1840 entropy_source: &'a ES,
1841 signer_provider: &'a SP,
1842 counterparty_node_id: PublicKey,
1843 their_features: &'a InitFeatures,
1844 funding_satoshis: u64,
1847 config: &'a UserConfig,
1848 current_chain_height: u32,
1849 outbound_scid_alias: u64,
1850 temporary_channel_id: Option<ChannelId>,
1851 channel_type: ChannelTypeFeatures,
1852 ) -> Result<ChannelContext<SP>, APIError>
1854 ES::Target: EntropySource,
1855 F::Target: FeeEstimator,
1856 SP::Target: SignerProvider,
1858 // This will be updated with the counterparty contribution if this is a dual-funded channel
1859 let channel_value_satoshis = funding_satoshis;
1861 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1862 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
1863 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1864 let pubkeys = holder_signer.pubkeys().clone();
1866 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1867 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1869 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1870 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1872 let channel_value_msat = channel_value_satoshis * 1000;
1873 if push_msat > channel_value_msat {
1874 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1876 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1877 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1879 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
1880 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1881 // Protocol level safety check in place, although it should never happen because
1882 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1883 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
1886 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1888 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1889 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1891 (ConfirmationTarget::NonAnchorChannelFee, 0)
1893 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1895 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1896 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1897 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1898 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1901 let mut secp_ctx = Secp256k1::new();
1902 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1904 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1905 match signer_provider.get_shutdown_scriptpubkey() {
1906 Ok(scriptpubkey) => Some(scriptpubkey),
1907 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1911 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1912 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1913 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1917 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1918 Ok(script) => script,
1919 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1922 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1927 config: LegacyChannelConfig {
1928 options: config.channel_config.clone(),
1929 announced_channel: config.channel_handshake_config.announced_channel,
1930 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1935 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1937 channel_id: temporary_channel_id,
1938 temporary_channel_id: Some(temporary_channel_id),
1939 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1940 announcement_sigs_state: AnnouncementSigsState::NotSent,
1942 channel_value_satoshis,
1944 latest_monitor_update_id: 0,
1946 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1947 shutdown_scriptpubkey,
1950 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1951 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1954 pending_inbound_htlcs: Vec::new(),
1955 pending_outbound_htlcs: Vec::new(),
1956 holding_cell_htlc_updates: Vec::new(),
1957 pending_update_fee: None,
1958 holding_cell_update_fee: None,
1959 next_holder_htlc_id: 0,
1960 next_counterparty_htlc_id: 0,
1961 update_time_counter: 1,
1963 resend_order: RAACommitmentOrder::CommitmentFirst,
1965 monitor_pending_channel_ready: false,
1966 monitor_pending_revoke_and_ack: false,
1967 monitor_pending_commitment_signed: false,
1968 monitor_pending_forwards: Vec::new(),
1969 monitor_pending_failures: Vec::new(),
1970 monitor_pending_finalized_fulfills: Vec::new(),
1972 signer_pending_commitment_update: false,
1973 signer_pending_funding: false,
1975 #[cfg(debug_assertions)]
1976 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1977 #[cfg(debug_assertions)]
1978 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1980 last_sent_closing_fee: None,
1981 pending_counterparty_closing_signed: None,
1982 expecting_peer_commitment_signed: false,
1983 closing_fee_limits: None,
1984 target_closing_feerate_sats_per_kw: None,
1986 funding_tx_confirmed_in: None,
1987 funding_tx_confirmation_height: 0,
1988 short_channel_id: None,
1989 channel_creation_height: current_chain_height,
1991 feerate_per_kw: commitment_feerate,
1992 counterparty_dust_limit_satoshis: 0,
1993 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1994 counterparty_max_htlc_value_in_flight_msat: 0,
1995 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1996 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
1997 holder_selected_channel_reserve_satoshis,
1998 counterparty_htlc_minimum_msat: 0,
1999 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2000 counterparty_max_accepted_htlcs: 0,
2001 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2002 minimum_depth: None, // Filled in in accept_channel
2004 counterparty_forwarding_info: None,
2006 channel_transaction_parameters: ChannelTransactionParameters {
2007 holder_pubkeys: pubkeys,
2008 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2009 is_outbound_from_holder: true,
2010 counterparty_parameters: None,
2011 funding_outpoint: None,
2012 channel_type_features: channel_type.clone()
2014 funding_transaction: None,
2015 is_batch_funding: None,
2017 counterparty_cur_commitment_point: None,
2018 counterparty_prev_commitment_point: None,
2019 counterparty_node_id,
2021 counterparty_shutdown_scriptpubkey: None,
2023 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2025 channel_update_status: ChannelUpdateStatus::Enabled,
2026 closing_signed_in_flight: false,
2028 announcement_sigs: None,
2030 #[cfg(any(test, fuzzing))]
2031 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2032 #[cfg(any(test, fuzzing))]
2033 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2035 workaround_lnd_bug_4006: None,
2036 sent_message_awaiting_response: None,
2038 latest_inbound_scid_alias: None,
2039 outbound_scid_alias,
2041 channel_pending_event_emitted: false,
2042 channel_ready_event_emitted: false,
2044 #[cfg(any(test, fuzzing))]
2045 historical_inbound_htlc_fulfills: new_hash_set(),
2050 blocked_monitor_updates: Vec::new(),
2051 local_initiated_shutdown: None,
2055 /// Allowed in any state (including after shutdown)
2056 pub fn get_update_time_counter(&self) -> u32 {
2057 self.update_time_counter
2060 pub fn get_latest_monitor_update_id(&self) -> u64 {
2061 self.latest_monitor_update_id
2064 pub fn should_announce(&self) -> bool {
2065 self.config.announced_channel
2068 pub fn is_outbound(&self) -> bool {
2069 self.channel_transaction_parameters.is_outbound_from_holder
2072 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2073 /// Allowed in any state (including after shutdown)
2074 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2075 self.config.options.forwarding_fee_base_msat
2078 /// Returns true if we've ever received a message from the remote end for this Channel
2079 pub fn have_received_message(&self) -> bool {
2080 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2083 /// Returns true if this channel is fully established and not known to be closing.
2084 /// Allowed in any state (including after shutdown)
2085 pub fn is_usable(&self) -> bool {
2086 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2087 !self.channel_state.is_local_shutdown_sent() &&
2088 !self.channel_state.is_remote_shutdown_sent() &&
2089 !self.monitor_pending_channel_ready
2092 /// shutdown state returns the state of the channel in its various stages of shutdown
2093 pub fn shutdown_state(&self) -> ChannelShutdownState {
2094 match self.channel_state {
2095 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2096 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2097 ChannelShutdownState::ShutdownInitiated
2098 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2099 ChannelShutdownState::ResolvingHTLCs
2100 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2101 ChannelShutdownState::NegotiatingClosingFee
2103 ChannelShutdownState::NotShuttingDown
2105 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2106 _ => ChannelShutdownState::NotShuttingDown,
2110 fn closing_negotiation_ready(&self) -> bool {
2111 let is_ready_to_close = match self.channel_state {
2112 ChannelState::AwaitingChannelReady(flags) =>
2113 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2114 ChannelState::ChannelReady(flags) =>
2115 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2118 self.pending_inbound_htlcs.is_empty() &&
2119 self.pending_outbound_htlcs.is_empty() &&
2120 self.pending_update_fee.is_none() &&
2124 /// Returns true if this channel is currently available for use. This is a superset of
2125 /// is_usable() and considers things like the channel being temporarily disabled.
2126 /// Allowed in any state (including after shutdown)
2127 pub fn is_live(&self) -> bool {
2128 self.is_usable() && !self.channel_state.is_peer_disconnected()
2131 // Public utilities:
2133 pub fn channel_id(&self) -> ChannelId {
2137 // Return the `temporary_channel_id` used during channel establishment.
2139 // Will return `None` for channels created prior to LDK version 0.0.115.
2140 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2141 self.temporary_channel_id
2144 pub fn minimum_depth(&self) -> Option<u32> {
2148 /// Gets the "user_id" value passed into the construction of this channel. It has no special
2149 /// meaning and exists only to allow users to have a persistent identifier of a channel.
2150 pub fn get_user_id(&self) -> u128 {
2154 /// Gets the channel's type
2155 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2159 /// Gets the channel's `short_channel_id`.
2161 /// Will return `None` if the channel hasn't been confirmed yet.
2162 pub fn get_short_channel_id(&self) -> Option<u64> {
2163 self.short_channel_id
2166 /// Allowed in any state (including after shutdown)
2167 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2168 self.latest_inbound_scid_alias
2171 /// Allowed in any state (including after shutdown)
2172 pub fn outbound_scid_alias(&self) -> u64 {
2173 self.outbound_scid_alias
2176 /// Returns the holder signer for this channel.
2178 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2179 return &self.holder_signer
2182 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2183 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2184 /// or prior to any channel actions during `Channel` initialization.
2185 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2186 debug_assert_eq!(self.outbound_scid_alias, 0);
2187 self.outbound_scid_alias = outbound_scid_alias;
2190 /// Returns the funding_txo we either got from our peer, or were given by
2191 /// get_funding_created.
2192 pub fn get_funding_txo(&self) -> Option<OutPoint> {
2193 self.channel_transaction_parameters.funding_outpoint
2196 /// Returns the height in which our funding transaction was confirmed.
2197 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2198 let conf_height = self.funding_tx_confirmation_height;
2199 if conf_height > 0 {
2206 /// Returns the block hash in which our funding transaction was confirmed.
2207 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2208 self.funding_tx_confirmed_in
2211 /// Returns the current number of confirmations on the funding transaction.
2212 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2213 if self.funding_tx_confirmation_height == 0 {
2214 // We either haven't seen any confirmation yet, or observed a reorg.
2218 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2221 fn get_holder_selected_contest_delay(&self) -> u16 {
2222 self.channel_transaction_parameters.holder_selected_contest_delay
2225 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2226 &self.channel_transaction_parameters.holder_pubkeys
2229 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2230 self.channel_transaction_parameters.counterparty_parameters
2231 .as_ref().map(|params| params.selected_contest_delay)
2234 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2235 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2238 /// Allowed in any state (including after shutdown)
2239 pub fn get_counterparty_node_id(&self) -> PublicKey {
2240 self.counterparty_node_id
2243 /// Allowed in any state (including after shutdown)
2244 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2245 self.holder_htlc_minimum_msat
2248 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2249 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2250 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2253 /// Allowed in any state (including after shutdown)
2254 pub fn get_announced_htlc_max_msat(&self) -> u64 {
2256 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2257 // to use full capacity. This is an effort to reduce routing failures, because in many cases
2258 // channel might have been used to route very small values (either by honest users or as DoS).
2259 self.channel_value_satoshis * 1000 * 9 / 10,
2261 self.counterparty_max_htlc_value_in_flight_msat
2265 /// Allowed in any state (including after shutdown)
2266 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2267 self.counterparty_htlc_minimum_msat
2270 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2271 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2272 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2275 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2276 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2277 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2279 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2280 party_max_htlc_value_in_flight_msat
2285 pub fn get_value_satoshis(&self) -> u64 {
2286 self.channel_value_satoshis
2289 pub fn get_fee_proportional_millionths(&self) -> u32 {
2290 self.config.options.forwarding_fee_proportional_millionths
2293 pub fn get_cltv_expiry_delta(&self) -> u16 {
2294 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2297 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2298 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2299 where F::Target: FeeEstimator
2301 match self.config.options.max_dust_htlc_exposure {
2302 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2303 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2304 ConfirmationTarget::OnChainSweep) as u64;
2305 feerate_per_kw.saturating_mul(multiplier)
2307 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2311 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2312 pub fn prev_config(&self) -> Option<ChannelConfig> {
2313 self.prev_config.map(|prev_config| prev_config.0)
2316 // Checks whether we should emit a `ChannelPending` event.
2317 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2318 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2321 // Returns whether we already emitted a `ChannelPending` event.
2322 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2323 self.channel_pending_event_emitted
2326 // Remembers that we already emitted a `ChannelPending` event.
2327 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2328 self.channel_pending_event_emitted = true;
2331 // Checks whether we should emit a `ChannelReady` event.
2332 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2333 self.is_usable() && !self.channel_ready_event_emitted
2336 // Remembers that we already emitted a `ChannelReady` event.
2337 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2338 self.channel_ready_event_emitted = true;
2341 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2342 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2343 /// no longer be considered when forwarding HTLCs.
2344 pub fn maybe_expire_prev_config(&mut self) {
2345 if self.prev_config.is_none() {
2348 let prev_config = self.prev_config.as_mut().unwrap();
2350 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2351 self.prev_config = None;
2355 /// Returns the current [`ChannelConfig`] applied to the channel.
2356 pub fn config(&self) -> ChannelConfig {
2360 /// Updates the channel's config. A bool is returned indicating whether the config update
2361 /// applied resulted in a new ChannelUpdate message.
2362 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2363 let did_channel_update =
2364 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2365 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2366 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2367 if did_channel_update {
2368 self.prev_config = Some((self.config.options, 0));
2369 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2370 // policy change to propagate throughout the network.
2371 self.update_time_counter += 1;
2373 self.config.options = *config;
2377 /// Returns true if funding_signed was sent/received and the
2378 /// funding transaction has been broadcast if necessary.
2379 pub fn is_funding_broadcast(&self) -> bool {
2380 !self.channel_state.is_pre_funded_state() &&
2381 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2384 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2385 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2386 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2387 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2388 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2390 /// @local is used only to convert relevant internal structures which refer to remote vs local
2391 /// to decide value of outputs and direction of HTLCs.
2392 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2393 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2394 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2395 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2396 /// which peer generated this transaction and "to whom" this transaction flows.
2398 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2399 where L::Target: Logger
2401 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2402 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2403 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2405 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2406 let mut remote_htlc_total_msat = 0;
2407 let mut local_htlc_total_msat = 0;
2408 let mut value_to_self_msat_offset = 0;
2410 let mut feerate_per_kw = self.feerate_per_kw;
2411 if let Some((feerate, update_state)) = self.pending_update_fee {
2412 if match update_state {
2413 // Note that these match the inclusion criteria when scanning
2414 // pending_inbound_htlcs below.
2415 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2416 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2417 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
2419 feerate_per_kw = feerate;
2423 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2424 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2425 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2427 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2429 macro_rules! get_htlc_in_commitment {
2430 ($htlc: expr, $offered: expr) => {
2431 HTLCOutputInCommitment {
2433 amount_msat: $htlc.amount_msat,
2434 cltv_expiry: $htlc.cltv_expiry,
2435 payment_hash: $htlc.payment_hash,
2436 transaction_output_index: None
2441 macro_rules! add_htlc_output {
2442 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2443 if $outbound == local { // "offered HTLC output"
2444 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2445 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2448 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2450 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2451 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2452 included_non_dust_htlcs.push((htlc_in_tx, $source));
2454 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2455 included_dust_htlcs.push((htlc_in_tx, $source));
2458 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2459 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2462 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2464 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2465 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2466 included_non_dust_htlcs.push((htlc_in_tx, $source));
2468 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2469 included_dust_htlcs.push((htlc_in_tx, $source));
2475 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2477 for ref htlc in self.pending_inbound_htlcs.iter() {
2478 let (include, state_name) = match htlc.state {
2479 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2480 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2481 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2482 InboundHTLCState::Committed => (true, "Committed"),
2483 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2487 add_htlc_output!(htlc, false, None, state_name);
2488 remote_htlc_total_msat += htlc.amount_msat;
2490 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2492 &InboundHTLCState::LocalRemoved(ref reason) => {
2493 if generated_by_local {
2494 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2495 inbound_htlc_preimages.push(preimage);
2496 value_to_self_msat_offset += htlc.amount_msat as i64;
2506 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2508 for ref htlc in self.pending_outbound_htlcs.iter() {
2509 let (include, state_name) = match htlc.state {
2510 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2511 OutboundHTLCState::Committed => (true, "Committed"),
2512 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2513 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2514 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2517 let preimage_opt = match htlc.state {
2518 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2519 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2520 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2524 if let Some(preimage) = preimage_opt {
2525 outbound_htlc_preimages.push(preimage);
2529 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2530 local_htlc_total_msat += htlc.amount_msat;
2532 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2534 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2535 value_to_self_msat_offset -= htlc.amount_msat as i64;
2537 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2538 if !generated_by_local {
2539 value_to_self_msat_offset -= htlc.amount_msat as i64;
2547 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2548 assert!(value_to_self_msat >= 0);
2549 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2550 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2551 // "violate" their reserve value by couting those against it. Thus, we have to convert
2552 // everything to i64 before subtracting as otherwise we can overflow.
2553 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2554 assert!(value_to_remote_msat >= 0);
2556 #[cfg(debug_assertions)]
2558 // Make sure that the to_self/to_remote is always either past the appropriate
2559 // channel_reserve *or* it is making progress towards it.
2560 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2561 self.holder_max_commitment_tx_output.lock().unwrap()
2563 self.counterparty_max_commitment_tx_output.lock().unwrap()
2565 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2566 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2567 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2568 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2571 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2572 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2573 let (value_to_self, value_to_remote) = if self.is_outbound() {
2574 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2576 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2579 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2580 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2581 let (funding_pubkey_a, funding_pubkey_b) = if local {
2582 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2584 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2587 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2588 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2593 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2594 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2599 let num_nondust_htlcs = included_non_dust_htlcs.len();
2601 let channel_parameters =
2602 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2603 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2604 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2611 &mut included_non_dust_htlcs,
2614 let mut htlcs_included = included_non_dust_htlcs;
2615 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2616 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2617 htlcs_included.append(&mut included_dust_htlcs);
2625 local_balance_msat: value_to_self_msat as u64,
2626 remote_balance_msat: value_to_remote_msat as u64,
2627 inbound_htlc_preimages,
2628 outbound_htlc_preimages,
2633 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2634 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2635 /// our counterparty!)
2636 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2637 /// TODO Some magic rust shit to compile-time check this?
2638 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2639 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2640 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2641 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2642 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2644 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2648 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2649 /// will sign and send to our counterparty.
2650 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2651 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2652 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2653 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2654 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2656 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2659 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2660 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2661 /// Panics if called before accept_channel/InboundV1Channel::new
2662 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2663 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2666 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2667 &self.get_counterparty_pubkeys().funding_pubkey
2670 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2674 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2675 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2676 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2677 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2678 // more dust balance if the feerate increases when we have several HTLCs pending
2679 // which are near the dust limit.
2680 let mut feerate_per_kw = self.feerate_per_kw;
2681 // If there's a pending update fee, use it to ensure we aren't under-estimating
2682 // potential feerate updates coming soon.
2683 if let Some((feerate, _)) = self.pending_update_fee {
2684 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2686 if let Some(feerate) = outbound_feerate_update {
2687 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2689 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2690 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2693 /// Get forwarding information for the counterparty.
2694 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2695 self.counterparty_forwarding_info.clone()
2698 /// Returns a HTLCStats about inbound pending htlcs
2699 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2701 let mut stats = HTLCStats {
2702 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2703 pending_htlcs_value_msat: 0,
2704 on_counterparty_tx_dust_exposure_msat: 0,
2705 on_holder_tx_dust_exposure_msat: 0,
2706 holding_cell_msat: 0,
2707 on_holder_tx_holding_cell_htlcs_count: 0,
2710 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2713 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2714 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2715 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2717 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2718 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2719 for ref htlc in context.pending_inbound_htlcs.iter() {
2720 stats.pending_htlcs_value_msat += htlc.amount_msat;
2721 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2722 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2724 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2725 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2731 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2732 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2734 let mut stats = HTLCStats {
2735 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2736 pending_htlcs_value_msat: 0,
2737 on_counterparty_tx_dust_exposure_msat: 0,
2738 on_holder_tx_dust_exposure_msat: 0,
2739 holding_cell_msat: 0,
2740 on_holder_tx_holding_cell_htlcs_count: 0,
2743 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2746 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2747 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2748 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2750 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2751 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2752 for ref htlc in context.pending_outbound_htlcs.iter() {
2753 stats.pending_htlcs_value_msat += htlc.amount_msat;
2754 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2755 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2757 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2758 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2762 for update in context.holding_cell_htlc_updates.iter() {
2763 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2764 stats.pending_htlcs += 1;
2765 stats.pending_htlcs_value_msat += amount_msat;
2766 stats.holding_cell_msat += amount_msat;
2767 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2768 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2770 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2771 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2773 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2780 /// Returns information on all pending inbound HTLCs.
2781 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2782 let mut holding_cell_states = new_hash_map();
2783 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2784 match holding_cell_update {
2785 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2786 holding_cell_states.insert(
2788 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2791 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2792 holding_cell_states.insert(
2794 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2797 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2798 holding_cell_states.insert(
2800 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2804 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2807 let mut inbound_details = Vec::new();
2808 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2811 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2812 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2814 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2815 for htlc in self.pending_inbound_htlcs.iter() {
2816 if let Some(state_details) = (&htlc.state).into() {
2817 inbound_details.push(InboundHTLCDetails{
2818 htlc_id: htlc.htlc_id,
2819 amount_msat: htlc.amount_msat,
2820 cltv_expiry: htlc.cltv_expiry,
2821 payment_hash: htlc.payment_hash,
2822 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2823 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2830 /// Returns information on all pending outbound HTLCs.
2831 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2832 let mut outbound_details = Vec::new();
2833 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2836 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2837 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2839 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2840 for htlc in self.pending_outbound_htlcs.iter() {
2841 outbound_details.push(OutboundHTLCDetails{
2842 htlc_id: Some(htlc.htlc_id),
2843 amount_msat: htlc.amount_msat,
2844 cltv_expiry: htlc.cltv_expiry,
2845 payment_hash: htlc.payment_hash,
2846 skimmed_fee_msat: htlc.skimmed_fee_msat,
2847 state: Some((&htlc.state).into()),
2848 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2851 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2852 if let HTLCUpdateAwaitingACK::AddHTLC {
2858 } = *holding_cell_update {
2859 outbound_details.push(OutboundHTLCDetails{
2861 amount_msat: amount_msat,
2862 cltv_expiry: cltv_expiry,
2863 payment_hash: payment_hash,
2864 skimmed_fee_msat: skimmed_fee_msat,
2865 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2866 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2873 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2874 /// Doesn't bother handling the
2875 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2876 /// corner case properly.
2877 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2878 -> AvailableBalances
2879 where F::Target: FeeEstimator
2881 let context = &self;
2882 // Note that we have to handle overflow due to the above case.
2883 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2884 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2886 let mut balance_msat = context.value_to_self_msat;
2887 for ref htlc in context.pending_inbound_htlcs.iter() {
2888 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2889 balance_msat += htlc.amount_msat;
2892 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2894 let outbound_capacity_msat = context.value_to_self_msat
2895 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2897 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2899 let mut available_capacity_msat = outbound_capacity_msat;
2901 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2902 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2906 if context.is_outbound() {
2907 // We should mind channel commit tx fee when computing how much of the available capacity
2908 // can be used in the next htlc. Mirrors the logic in send_htlc.
2910 // The fee depends on whether the amount we will be sending is above dust or not,
2911 // and the answer will in turn change the amount itself — making it a circular
2913 // This complicates the computation around dust-values, up to the one-htlc-value.
2914 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2915 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2916 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2919 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2920 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2921 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2922 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2923 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2924 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2925 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2928 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2929 // value ends up being below dust, we have this fee available again. In that case,
2930 // match the value to right-below-dust.
2931 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2932 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2933 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2934 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2935 debug_assert!(one_htlc_difference_msat != 0);
2936 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2937 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2938 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2940 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2943 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2944 // sending a new HTLC won't reduce their balance below our reserve threshold.
2945 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2946 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2947 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2950 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2951 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2953 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2954 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2955 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2957 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2958 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2959 // we've selected for them, we can only send dust HTLCs.
2960 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2964 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2966 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2967 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2968 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2969 // send above the dust limit (as the router can always overpay to meet the dust limit).
2970 let mut remaining_msat_below_dust_exposure_limit = None;
2971 let mut dust_exposure_dust_limit_msat = 0;
2972 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2974 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2975 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2977 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2978 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2979 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2981 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2982 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2983 remaining_msat_below_dust_exposure_limit =
2984 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2985 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2988 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2989 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2990 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2991 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2992 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2993 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2996 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2997 if available_capacity_msat < dust_exposure_dust_limit_msat {
2998 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3000 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3004 available_capacity_msat = cmp::min(available_capacity_msat,
3005 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
3007 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
3008 available_capacity_msat = 0;
3012 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3013 - context.value_to_self_msat as i64
3014 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
3015 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3017 outbound_capacity_msat,
3018 next_outbound_htlc_limit_msat: available_capacity_msat,
3019 next_outbound_htlc_minimum_msat,
3024 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3025 let context = &self;
3026 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3029 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3030 /// number of pending HTLCs that are on track to be in our next commitment tx.
3032 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3033 /// `fee_spike_buffer_htlc` is `Some`.
3035 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3036 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3038 /// Dust HTLCs are excluded.
3039 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3040 let context = &self;
3041 assert!(context.is_outbound());
3043 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3046 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3047 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3049 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3050 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3052 let mut addl_htlcs = 0;
3053 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3055 HTLCInitiator::LocalOffered => {
3056 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3060 HTLCInitiator::RemoteOffered => {
3061 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3067 let mut included_htlcs = 0;
3068 for ref htlc in context.pending_inbound_htlcs.iter() {
3069 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3072 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3073 // transaction including this HTLC if it times out before they RAA.
3074 included_htlcs += 1;
3077 for ref htlc in context.pending_outbound_htlcs.iter() {
3078 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3082 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3083 OutboundHTLCState::Committed => included_htlcs += 1,
3084 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3085 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3086 // transaction won't be generated until they send us their next RAA, which will mean
3087 // dropping any HTLCs in this state.
3092 for htlc in context.holding_cell_htlc_updates.iter() {
3094 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3095 if amount_msat / 1000 < real_dust_limit_timeout_sat {
3100 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3101 // ack we're guaranteed to never include them in commitment txs anymore.
3105 let num_htlcs = included_htlcs + addl_htlcs;
3106 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3107 #[cfg(any(test, fuzzing))]
3110 if fee_spike_buffer_htlc.is_some() {
3111 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3113 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3114 + context.holding_cell_htlc_updates.len();
3115 let commitment_tx_info = CommitmentTxInfoCached {
3117 total_pending_htlcs,
3118 next_holder_htlc_id: match htlc.origin {
3119 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3120 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3122 next_counterparty_htlc_id: match htlc.origin {
3123 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3124 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3126 feerate: context.feerate_per_kw,
3128 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3133 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3134 /// pending HTLCs that are on track to be in their next commitment tx
3136 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3137 /// `fee_spike_buffer_htlc` is `Some`.
3139 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3140 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3142 /// Dust HTLCs are excluded.
3143 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3144 let context = &self;
3145 assert!(!context.is_outbound());
3147 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3150 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3151 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3153 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3154 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3156 let mut addl_htlcs = 0;
3157 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3159 HTLCInitiator::LocalOffered => {
3160 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3164 HTLCInitiator::RemoteOffered => {
3165 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3171 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3172 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3173 // committed outbound HTLCs, see below.
3174 let mut included_htlcs = 0;
3175 for ref htlc in context.pending_inbound_htlcs.iter() {
3176 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3179 included_htlcs += 1;
3182 for ref htlc in context.pending_outbound_htlcs.iter() {
3183 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3186 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3187 // i.e. if they've responded to us with an RAA after announcement.
3189 OutboundHTLCState::Committed => included_htlcs += 1,
3190 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3191 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3196 let num_htlcs = included_htlcs + addl_htlcs;
3197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3198 #[cfg(any(test, fuzzing))]
3201 if fee_spike_buffer_htlc.is_some() {
3202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3205 let commitment_tx_info = CommitmentTxInfoCached {
3207 total_pending_htlcs,
3208 next_holder_htlc_id: match htlc.origin {
3209 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3210 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3212 next_counterparty_htlc_id: match htlc.origin {
3213 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3214 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3216 feerate: context.feerate_per_kw,
3218 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3223 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3224 match self.channel_state {
3225 ChannelState::FundingNegotiated => f(),
3226 ChannelState::AwaitingChannelReady(flags) =>
3227 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3228 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3238 /// Returns the transaction if there is a pending funding transaction that is yet to be
3240 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3241 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3244 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3246 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3247 self.if_unbroadcasted_funding(||
3248 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3252 /// Returns whether the channel is funded in a batch.
3253 pub fn is_batch_funding(&self) -> bool {
3254 self.is_batch_funding.is_some()
3257 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3259 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3260 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3263 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3264 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3265 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3266 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3267 /// immediately (others we will have to allow to time out).
3268 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3269 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3270 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3271 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3272 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3273 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3275 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3276 // return them to fail the payment.
3277 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3278 let counterparty_node_id = self.get_counterparty_node_id();
3279 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3281 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3282 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3287 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3288 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3289 // returning a channel monitor update here would imply a channel monitor update before
3290 // we even registered the channel monitor to begin with, which is invalid.
3291 // Thus, if we aren't actually at a point where we could conceivably broadcast the
3292 // funding transaction, don't return a funding txo (which prevents providing the
3293 // monitor update to the user, even if we return one).
3294 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3295 if !self.channel_state.is_pre_funded_state() {
3296 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3297 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3298 update_id: self.latest_monitor_update_id,
3299 counterparty_node_id: Some(self.counterparty_node_id),
3300 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3301 channel_id: Some(self.channel_id()),
3305 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3306 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3308 self.channel_state = ChannelState::ShutdownComplete;
3309 self.update_time_counter += 1;
3313 dropped_outbound_htlcs,
3314 unbroadcasted_batch_funding_txid,
3315 channel_id: self.channel_id,
3316 user_channel_id: self.user_id,
3317 channel_capacity_satoshis: self.channel_value_satoshis,
3318 counterparty_node_id: self.counterparty_node_id,
3319 unbroadcasted_funding_tx,
3320 channel_funding_txo: self.get_funding_txo(),
3324 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3325 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3326 let counterparty_keys = self.build_remote_transaction_keys();
3327 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3329 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3330 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3331 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3332 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3334 match &self.holder_signer {
3335 // TODO (arik): move match into calling method for Taproot
3336 ChannelSignerType::Ecdsa(ecdsa) => {
3337 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3338 .map(|(signature, _)| msgs::FundingSigned {
3339 channel_id: self.channel_id(),
3342 partial_signature_with_nonce: None,
3346 if funding_signed.is_none() {
3347 #[cfg(not(async_signing))] {
3348 panic!("Failed to get signature for funding_signed");
3350 #[cfg(async_signing)] {
3351 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3352 self.signer_pending_funding = true;
3354 } else if self.signer_pending_funding {
3355 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3356 self.signer_pending_funding = false;
3359 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3360 (counterparty_initial_commitment_tx, funding_signed)
3362 // TODO (taproot|arik)
3369 // Internal utility functions for channels
3371 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3372 /// `channel_value_satoshis` in msat, set through
3373 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3375 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3377 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3378 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3379 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3381 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3384 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3386 channel_value_satoshis * 10 * configured_percent
3389 /// Returns a minimum channel reserve value the remote needs to maintain,
3390 /// required by us according to the configured or default
3391 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3393 /// Guaranteed to return a value no larger than channel_value_satoshis
3395 /// This is used both for outbound and inbound channels and has lower bound
3396 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3397 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3398 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3399 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3402 /// This is for legacy reasons, present for forward-compatibility.
3403 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3404 /// from storage. Hence, we use this function to not persist default values of
3405 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3406 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3407 let (q, _) = channel_value_satoshis.overflowing_div(100);
3408 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3411 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3412 /// default of 1% of the total channel value.
3414 /// Guaranteed to return a value no larger than channel_value_satoshis
3416 /// This is used both for outbound and inbound channels and has lower bound
3417 /// of `dust_limit_satoshis`.
3418 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3419 // Fixed at 1% of channel value by spec.
3420 let (q, _) = channel_value_satoshis.overflowing_div(100);
3421 cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3424 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3425 // Note that num_htlcs should not include dust HTLCs.
3427 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3428 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3431 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3432 // Note that num_htlcs should not include dust HTLCs.
3433 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3434 // Note that we need to divide before multiplying to round properly,
3435 // since the lowest denomination of bitcoin on-chain is the satoshi.
3436 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3439 /// Context for dual-funded channels.
3440 #[cfg(dual_funding)]
3441 pub(super) struct DualFundingChannelContext {
3442 /// The amount in satoshis we will be contributing to the channel.
3443 pub our_funding_satoshis: u64,
3444 /// The amount in satoshis our counterparty will be contributing to the channel.
3445 pub their_funding_satoshis: u64,
3446 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3447 /// to the current block height to align incentives against fee-sniping.
3448 pub funding_tx_locktime: u32,
3449 /// The feerate set by the initiator to be used for the funding transaction.
3450 pub funding_feerate_sat_per_1000_weight: u32,
3453 // Holder designates channel data owned for the benefit of the user client.
3454 // Counterparty designates channel data owned by the another channel participant entity.
3455 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3456 pub context: ChannelContext<SP>,
3457 #[cfg(dual_funding)]
3458 pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3461 #[cfg(any(test, fuzzing))]
3462 struct CommitmentTxInfoCached {
3464 total_pending_htlcs: usize,
3465 next_holder_htlc_id: u64,
3466 next_counterparty_htlc_id: u64,
3470 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3471 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3472 trait FailHTLCContents {
3473 type Message: FailHTLCMessageName;
3474 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3475 fn to_inbound_htlc_state(self) -> InboundHTLCState;
3476 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3478 impl FailHTLCContents for msgs::OnionErrorPacket {
3479 type Message = msgs::UpdateFailHTLC;
3480 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3481 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3483 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3484 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3486 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3487 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3490 impl FailHTLCContents for ([u8; 32], u16) {
3491 type Message = msgs::UpdateFailMalformedHTLC;
3492 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3493 msgs::UpdateFailMalformedHTLC {
3496 sha256_of_onion: self.0,
3497 failure_code: self.1
3500 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3501 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3503 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3504 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3506 sha256_of_onion: self.0,
3507 failure_code: self.1
3512 trait FailHTLCMessageName {
3513 fn name() -> &'static str;
3515 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3516 fn name() -> &'static str {
3520 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3521 fn name() -> &'static str {
3522 "update_fail_malformed_htlc"
3526 impl<SP: Deref> Channel<SP> where
3527 SP::Target: SignerProvider,
3528 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3530 fn check_remote_fee<F: Deref, L: Deref>(
3531 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3532 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3533 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3535 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3536 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3538 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3540 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3541 if feerate_per_kw < lower_limit {
3542 if let Some(cur_feerate) = cur_feerate_per_kw {
3543 if feerate_per_kw > cur_feerate {
3545 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3546 cur_feerate, feerate_per_kw);
3550 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3556 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3557 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3558 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3559 // outside of those situations will fail.
3560 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3564 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3569 1 + // script length (0)
3573 )*4 + // * 4 for non-witness parts
3574 2 + // witness marker and flag
3575 1 + // witness element count
3576 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3577 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3578 2*(1 + 71); // two signatures + sighash type flags
3579 if let Some(spk) = a_scriptpubkey {
3580 ret += ((8+1) + // output values and script length
3581 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3583 if let Some(spk) = b_scriptpubkey {
3584 ret += ((8+1) + // output values and script length
3585 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3591 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3592 assert!(self.context.pending_inbound_htlcs.is_empty());
3593 assert!(self.context.pending_outbound_htlcs.is_empty());
3594 assert!(self.context.pending_update_fee.is_none());
3596 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3597 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3598 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3600 if value_to_holder < 0 {
3601 assert!(self.context.is_outbound());
3602 total_fee_satoshis += (-value_to_holder) as u64;
3603 } else if value_to_counterparty < 0 {
3604 assert!(!self.context.is_outbound());
3605 total_fee_satoshis += (-value_to_counterparty) as u64;
3608 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3609 value_to_counterparty = 0;
3612 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3613 value_to_holder = 0;
3616 assert!(self.context.shutdown_scriptpubkey.is_some());
3617 let holder_shutdown_script = self.get_closing_scriptpubkey();
3618 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3619 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3621 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3622 (closing_transaction, total_fee_satoshis)
3625 fn funding_outpoint(&self) -> OutPoint {
3626 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3629 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3632 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3633 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3635 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3637 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3638 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3639 where L::Target: Logger {
3640 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3641 // (see equivalent if condition there).
3642 assert!(!self.context.channel_state.can_generate_new_commitment());
3643 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3644 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3645 self.context.latest_monitor_update_id = mon_update_id;
3646 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3647 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3651 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3652 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3653 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3654 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3656 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3657 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3660 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3661 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3662 // these, but for now we just have to treat them as normal.
3664 let mut pending_idx = core::usize::MAX;
3665 let mut htlc_value_msat = 0;
3666 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3667 if htlc.htlc_id == htlc_id_arg {
3668 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3669 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3670 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3672 InboundHTLCState::Committed => {},
3673 InboundHTLCState::LocalRemoved(ref reason) => {
3674 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3676 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3677 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3679 return UpdateFulfillFetch::DuplicateClaim {};
3682 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3683 // Don't return in release mode here so that we can update channel_monitor
3687 htlc_value_msat = htlc.amount_msat;
3691 if pending_idx == core::usize::MAX {
3692 #[cfg(any(test, fuzzing))]
3693 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3694 // this is simply a duplicate claim, not previously failed and we lost funds.
3695 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3696 return UpdateFulfillFetch::DuplicateClaim {};
3699 // Now update local state:
3701 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3702 // can claim it even if the channel hits the chain before we see their next commitment.
3703 self.context.latest_monitor_update_id += 1;
3704 let monitor_update = ChannelMonitorUpdate {
3705 update_id: self.context.latest_monitor_update_id,
3706 counterparty_node_id: Some(self.context.counterparty_node_id),
3707 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3708 payment_preimage: payment_preimage_arg.clone(),
3710 channel_id: Some(self.context.channel_id()),
3713 if !self.context.channel_state.can_generate_new_commitment() {
3714 // Note that this condition is the same as the assertion in
3715 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3716 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3717 // do not not get into this branch.
3718 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3719 match pending_update {
3720 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3721 if htlc_id_arg == htlc_id {
3722 // Make sure we don't leave latest_monitor_update_id incremented here:
3723 self.context.latest_monitor_update_id -= 1;
3724 #[cfg(any(test, fuzzing))]
3725 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3726 return UpdateFulfillFetch::DuplicateClaim {};
3729 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3730 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3732 if htlc_id_arg == htlc_id {
3733 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3734 // TODO: We may actually be able to switch to a fulfill here, though its
3735 // rare enough it may not be worth the complexity burden.
3736 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3737 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3743 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3744 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3745 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3747 #[cfg(any(test, fuzzing))]
3748 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3749 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3751 #[cfg(any(test, fuzzing))]
3752 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3755 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3756 if let InboundHTLCState::Committed = htlc.state {
3758 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3759 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3761 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3762 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3765 UpdateFulfillFetch::NewClaim {
3768 msg: Some(msgs::UpdateFulfillHTLC {
3769 channel_id: self.context.channel_id(),
3770 htlc_id: htlc_id_arg,
3771 payment_preimage: payment_preimage_arg,
3776 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3777 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3778 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3779 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3780 // Even if we aren't supposed to let new monitor updates with commitment state
3781 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3782 // matter what. Sadly, to push a new monitor update which flies before others
3783 // already queued, we have to insert it into the pending queue and update the
3784 // update_ids of all the following monitors.
3785 if release_cs_monitor && msg.is_some() {
3786 let mut additional_update = self.build_commitment_no_status_check(logger);
3787 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3788 // to be strictly increasing by one, so decrement it here.
3789 self.context.latest_monitor_update_id = monitor_update.update_id;
3790 monitor_update.updates.append(&mut additional_update.updates);
3792 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3793 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3794 monitor_update.update_id = new_mon_id;
3795 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3796 held_update.update.update_id += 1;
3799 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3800 let update = self.build_commitment_no_status_check(logger);
3801 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3807 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3808 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3810 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3814 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3815 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3816 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3817 /// before we fail backwards.
3819 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3820 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3821 /// [`ChannelError::Ignore`].
3822 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3823 -> Result<(), ChannelError> where L::Target: Logger {
3824 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3825 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3828 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3829 /// want to fail blinded HTLCs where we are not the intro node.
3831 /// See [`Self::queue_fail_htlc`] for more info.
3832 pub fn queue_fail_malformed_htlc<L: Deref>(
3833 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3834 ) -> Result<(), ChannelError> where L::Target: Logger {
3835 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3836 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3839 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3840 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3841 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3842 /// before we fail backwards.
3844 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3845 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3846 /// [`ChannelError::Ignore`].
3847 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3848 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3850 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3851 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3852 panic!("Was asked to fail an HTLC when channel was not in an operational state");
3855 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3856 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3857 // these, but for now we just have to treat them as normal.
3859 let mut pending_idx = core::usize::MAX;
3860 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3861 if htlc.htlc_id == htlc_id_arg {
3863 InboundHTLCState::Committed => {},
3864 InboundHTLCState::LocalRemoved(ref reason) => {
3865 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3867 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3872 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3873 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3879 if pending_idx == core::usize::MAX {
3880 #[cfg(any(test, fuzzing))]
3881 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3882 // is simply a duplicate fail, not previously failed and we failed-back too early.
3883 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3887 if !self.context.channel_state.can_generate_new_commitment() {
3888 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3889 force_holding_cell = true;
3892 // Now update local state:
3893 if force_holding_cell {
3894 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3895 match pending_update {
3896 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3897 if htlc_id_arg == htlc_id {
3898 #[cfg(any(test, fuzzing))]
3899 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3903 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3904 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3906 if htlc_id_arg == htlc_id {
3907 debug_assert!(false, "Tried to fail an HTLC that was already failed");
3908 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3914 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3915 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3919 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3920 E::Message::name(), &self.context.channel_id());
3922 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3923 htlc.state = err_contents.clone().to_inbound_htlc_state();
3926 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3929 // Message handlers:
3930 /// Updates the state of the channel to indicate that all channels in the batch have received
3931 /// funding_signed and persisted their monitors.
3932 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3933 /// treated as a non-batch channel going forward.
3934 pub fn set_batch_ready(&mut self) {
3935 self.context.is_batch_funding = None;
3936 self.context.channel_state.clear_waiting_for_batch();
3939 /// Unsets the existing funding information.
3941 /// This must only be used if the channel has not yet completed funding and has not been used.
3943 /// Further, the channel must be immediately shut down after this with a call to
3944 /// [`ChannelContext::force_shutdown`].
3945 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3946 debug_assert!(matches!(
3947 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3949 self.context.channel_transaction_parameters.funding_outpoint = None;
3950 self.context.channel_id = temporary_channel_id;
3953 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3954 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3956 pub fn channel_ready<NS: Deref, L: Deref>(
3957 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3958 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3959 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3961 NS::Target: NodeSigner,
3964 if self.context.channel_state.is_peer_disconnected() {
3965 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3966 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3969 if let Some(scid_alias) = msg.short_channel_id_alias {
3970 if Some(scid_alias) != self.context.short_channel_id {
3971 // The scid alias provided can be used to route payments *from* our counterparty,
3972 // i.e. can be used for inbound payments and provided in invoices, but is not used
3973 // when routing outbound payments.
3974 self.context.latest_inbound_scid_alias = Some(scid_alias);
3978 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3979 // batch, but we can receive channel_ready messages.
3980 let mut check_reconnection = false;
3981 match &self.context.channel_state {
3982 ChannelState::AwaitingChannelReady(flags) => {
3983 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3984 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3985 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3986 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3987 check_reconnection = true;
3988 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3989 self.context.channel_state.set_their_channel_ready();
3990 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3991 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3992 self.context.update_time_counter += 1;
3994 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3995 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3998 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3999 ChannelState::ChannelReady(_) => check_reconnection = true,
4000 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4002 if check_reconnection {
4003 // They probably disconnected/reconnected and re-sent the channel_ready, which is
4004 // required, or they're sending a fresh SCID alias.
4005 let expected_point =
4006 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4007 // If they haven't ever sent an updated point, the point they send should match
4009 self.context.counterparty_cur_commitment_point
4010 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4011 // If we've advanced the commitment number once, the second commitment point is
4012 // at `counterparty_prev_commitment_point`, which is not yet revoked.
4013 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4014 self.context.counterparty_prev_commitment_point
4016 // If they have sent updated points, channel_ready is always supposed to match
4017 // their "first" point, which we re-derive here.
4018 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4019 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4020 ).expect("We already advanced, so previous secret keys should have been validated already")))
4022 if expected_point != Some(msg.next_per_commitment_point) {
4023 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4028 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4029 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4031 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4033 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
4036 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4037 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4038 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4039 ) -> Result<(), ChannelError>
4040 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4041 FE::Target: FeeEstimator, L::Target: Logger,
4043 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4044 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4046 // We can't accept HTLCs sent after we've sent a shutdown.
4047 if self.context.channel_state.is_local_shutdown_sent() {
4048 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4050 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4051 if self.context.channel_state.is_remote_shutdown_sent() {
4052 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4054 if self.context.channel_state.is_peer_disconnected() {
4055 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4057 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4058 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4060 if msg.amount_msat == 0 {
4061 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4063 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4064 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4067 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4068 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4069 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
4070 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4072 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4073 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4076 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4077 // the reserve_satoshis we told them to always have as direct payment so that they lose
4078 // something if we punish them for broadcasting an old state).
4079 // Note that we don't really care about having a small/no to_remote output in our local
4080 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4081 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4082 // present in the next commitment transaction we send them (at least for fulfilled ones,
4083 // failed ones won't modify value_to_self).
4084 // Note that we will send HTLCs which another instance of rust-lightning would think
4085 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4086 // Channel state once they will not be present in the next received commitment
4088 let mut removed_outbound_total_msat = 0;
4089 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4090 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4091 removed_outbound_total_msat += htlc.amount_msat;
4092 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4093 removed_outbound_total_msat += htlc.amount_msat;
4097 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4098 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4101 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4102 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4103 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4105 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4106 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4107 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4108 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4109 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4110 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4111 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4115 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4116 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4117 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4118 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4119 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4120 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4121 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4125 let pending_value_to_self_msat =
4126 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
4127 let pending_remote_value_msat =
4128 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4129 if pending_remote_value_msat < msg.amount_msat {
4130 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4133 // Check that the remote can afford to pay for this HTLC on-chain at the current
4134 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4136 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4137 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4138 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4140 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4141 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4145 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4146 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4148 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4149 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4153 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4154 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4158 if !self.context.is_outbound() {
4159 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4160 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
4161 // side, only on the sender's. Note that with anchor outputs we are no longer as
4162 // sensitive to fee spikes, so we need to account for them.
4163 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4164 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4165 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4166 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4168 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4169 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
4170 // the HTLC, i.e. its status is already set to failing.
4171 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4172 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4175 // Check that they won't violate our local required channel reserve by adding this HTLC.
4176 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4177 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4178 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4179 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4182 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4183 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4185 if msg.cltv_expiry >= 500000000 {
4186 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4189 if self.context.channel_state.is_local_shutdown_sent() {
4190 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4191 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4195 // Now update local state:
4196 self.context.next_counterparty_htlc_id += 1;
4197 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4198 htlc_id: msg.htlc_id,
4199 amount_msat: msg.amount_msat,
4200 payment_hash: msg.payment_hash,
4201 cltv_expiry: msg.cltv_expiry,
4202 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
4207 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4209 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4210 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4211 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4212 if htlc.htlc_id == htlc_id {
4213 let outcome = match check_preimage {
4214 None => fail_reason.into(),
4215 Some(payment_preimage) => {
4216 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4217 if payment_hash != htlc.payment_hash {
4218 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4220 OutboundHTLCOutcome::Success(Some(payment_preimage))
4224 OutboundHTLCState::LocalAnnounced(_) =>
4225 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4226 OutboundHTLCState::Committed => {
4227 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4229 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4230 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4235 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4238 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4239 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4240 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4242 if self.context.channel_state.is_peer_disconnected() {
4243 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4246 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4249 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4250 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4251 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4253 if self.context.channel_state.is_peer_disconnected() {
4254 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4257 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4261 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4262 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4263 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4265 if self.context.channel_state.is_peer_disconnected() {
4266 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4269 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4273 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4274 where L::Target: Logger
4276 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4277 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4279 if self.context.channel_state.is_peer_disconnected() {
4280 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4282 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4283 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4286 let funding_script = self.context.get_funding_redeemscript();
4288 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4290 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4291 let commitment_txid = {
4292 let trusted_tx = commitment_stats.tx.trust();
4293 let bitcoin_tx = trusted_tx.built_transaction();
4294 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4296 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4297 log_bytes!(msg.signature.serialize_compact()[..]),
4298 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4299 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4300 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4301 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4305 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4307 // If our counterparty updated the channel fee in this commitment transaction, check that
4308 // they can actually afford the new fee now.
4309 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4310 update_state == FeeUpdateState::RemoteAnnounced
4313 debug_assert!(!self.context.is_outbound());
4314 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4315 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4316 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4319 #[cfg(any(test, fuzzing))]
4321 if self.context.is_outbound() {
4322 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4323 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4324 if let Some(info) = projected_commit_tx_info {
4325 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4326 + self.context.holding_cell_htlc_updates.len();
4327 if info.total_pending_htlcs == total_pending_htlcs
4328 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4329 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4330 && info.feerate == self.context.feerate_per_kw {
4331 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4337 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4338 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4341 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4342 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4343 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4344 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4345 // backwards compatibility, we never use it in production. To provide test coverage, here,
4346 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4347 #[allow(unused_assignments, unused_mut)]
4348 let mut separate_nondust_htlc_sources = false;
4349 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4350 use core::hash::{BuildHasher, Hasher};
4351 // Get a random value using the only std API to do so - the DefaultHasher
4352 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4353 separate_nondust_htlc_sources = rand_val % 2 == 0;
4356 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4357 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4358 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4359 if let Some(_) = htlc.transaction_output_index {
4360 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4361 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4362 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4364 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4365 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4366 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4367 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4368 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4369 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4370 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4371 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4373 if !separate_nondust_htlc_sources {
4374 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4377 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4379 if separate_nondust_htlc_sources {
4380 if let Some(source) = source_opt.take() {
4381 nondust_htlc_sources.push(source);
4384 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4387 let holder_commitment_tx = HolderCommitmentTransaction::new(
4388 commitment_stats.tx,
4390 msg.htlc_signatures.clone(),
4391 &self.context.get_holder_pubkeys().funding_pubkey,
4392 self.context.counterparty_funding_pubkey()
4395 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4396 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4398 // Update state now that we've passed all the can-fail calls...
4399 let mut need_commitment = false;
4400 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4401 if *update_state == FeeUpdateState::RemoteAnnounced {
4402 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4403 need_commitment = true;
4407 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4408 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
4409 Some(forward_info.clone())
4411 if let Some(forward_info) = new_forward {
4412 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4413 &htlc.payment_hash, &self.context.channel_id);
4414 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
4415 need_commitment = true;
4418 let mut claimed_htlcs = Vec::new();
4419 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4420 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4421 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4422 &htlc.payment_hash, &self.context.channel_id);
4423 // Grab the preimage, if it exists, instead of cloning
4424 let mut reason = OutboundHTLCOutcome::Success(None);
4425 mem::swap(outcome, &mut reason);
4426 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4427 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4428 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4429 // have a `Success(None)` reason. In this case we could forget some HTLC
4430 // claims, but such an upgrade is unlikely and including claimed HTLCs here
4431 // fixes a bug which the user was exposed to on 0.0.104 when they started the
4433 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4435 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4436 need_commitment = true;
4440 self.context.latest_monitor_update_id += 1;
4441 let mut monitor_update = ChannelMonitorUpdate {
4442 update_id: self.context.latest_monitor_update_id,
4443 counterparty_node_id: Some(self.context.counterparty_node_id),
4444 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4445 commitment_tx: holder_commitment_tx,
4446 htlc_outputs: htlcs_and_sigs,
4448 nondust_htlc_sources,
4450 channel_id: Some(self.context.channel_id()),
4453 self.context.cur_holder_commitment_transaction_number -= 1;
4454 self.context.expecting_peer_commitment_signed = false;
4455 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4456 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4457 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4459 if self.context.channel_state.is_monitor_update_in_progress() {
4460 // In case we initially failed monitor updating without requiring a response, we need
4461 // to make sure the RAA gets sent first.
4462 self.context.monitor_pending_revoke_and_ack = true;
4463 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4464 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4465 // the corresponding HTLC status updates so that
4466 // get_last_commitment_update_for_send includes the right HTLCs.
4467 self.context.monitor_pending_commitment_signed = true;
4468 let mut additional_update = self.build_commitment_no_status_check(logger);
4469 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4470 // strictly increasing by one, so decrement it here.
4471 self.context.latest_monitor_update_id = monitor_update.update_id;
4472 monitor_update.updates.append(&mut additional_update.updates);
4474 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4475 &self.context.channel_id);
4476 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4479 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4480 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4481 // we'll send one right away when we get the revoke_and_ack when we
4482 // free_holding_cell_htlcs().
4483 let mut additional_update = self.build_commitment_no_status_check(logger);
4484 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4485 // strictly increasing by one, so decrement it here.
4486 self.context.latest_monitor_update_id = monitor_update.update_id;
4487 monitor_update.updates.append(&mut additional_update.updates);
4491 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4492 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4493 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4494 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4497 /// Public version of the below, checking relevant preconditions first.
4498 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4499 /// returns `(None, Vec::new())`.
4500 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4501 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4502 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4503 where F::Target: FeeEstimator, L::Target: Logger
4505 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4506 self.free_holding_cell_htlcs(fee_estimator, logger)
4507 } else { (None, Vec::new()) }
4510 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4511 /// for our counterparty.
4512 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4513 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4514 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4515 where F::Target: FeeEstimator, L::Target: Logger
4517 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4518 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4519 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4520 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4522 let mut monitor_update = ChannelMonitorUpdate {
4523 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4524 counterparty_node_id: Some(self.context.counterparty_node_id),
4525 updates: Vec::new(),
4526 channel_id: Some(self.context.channel_id()),
4529 let mut htlc_updates = Vec::new();
4530 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4531 let mut update_add_count = 0;
4532 let mut update_fulfill_count = 0;
4533 let mut update_fail_count = 0;
4534 let mut htlcs_to_fail = Vec::new();
4535 for htlc_update in htlc_updates.drain(..) {
4536 // Note that this *can* fail, though it should be due to rather-rare conditions on
4537 // fee races with adding too many outputs which push our total payments just over
4538 // the limit. In case it's less rare than I anticipate, we may want to revisit
4539 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4540 // to rebalance channels.
4541 let fail_htlc_res = match &htlc_update {
4542 &HTLCUpdateAwaitingACK::AddHTLC {
4543 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4544 skimmed_fee_msat, blinding_point, ..
4546 match self.send_htlc(
4547 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4548 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4550 Ok(_) => update_add_count += 1,
4553 ChannelError::Ignore(ref msg) => {
4554 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4555 // If we fail to send here, then this HTLC should
4556 // be failed backwards. Failing to send here
4557 // indicates that this HTLC may keep being put back
4558 // into the holding cell without ever being
4559 // successfully forwarded/failed/fulfilled, causing
4560 // our counterparty to eventually close on us.
4561 htlcs_to_fail.push((source.clone(), *payment_hash));
4564 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4571 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4572 // If an HTLC claim was previously added to the holding cell (via
4573 // `get_update_fulfill_htlc`, then generating the claim message itself must
4574 // not fail - any in between attempts to claim the HTLC will have resulted
4575 // in it hitting the holding cell again and we cannot change the state of a
4576 // holding cell HTLC from fulfill to anything else.
4577 let mut additional_monitor_update =
4578 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4579 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4580 { monitor_update } else { unreachable!() };
4581 update_fulfill_count += 1;
4582 monitor_update.updates.append(&mut additional_monitor_update.updates);
4585 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4586 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4587 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4589 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4590 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4591 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4594 if let Some(res) = fail_htlc_res {
4596 Ok(fail_msg_opt) => {
4597 // If an HTLC failure was previously added to the holding cell (via
4598 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4599 // not fail - we should never end up in a state where we double-fail
4600 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4601 // for a full revocation before failing.
4602 debug_assert!(fail_msg_opt.is_some());
4603 update_fail_count += 1;
4605 Err(ChannelError::Ignore(_)) => {},
4607 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4612 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4613 return (None, htlcs_to_fail);
4615 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4616 self.send_update_fee(feerate, false, fee_estimator, logger)
4621 let mut additional_update = self.build_commitment_no_status_check(logger);
4622 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4623 // but we want them to be strictly increasing by one, so reset it here.
4624 self.context.latest_monitor_update_id = monitor_update.update_id;
4625 monitor_update.updates.append(&mut additional_update.updates);
4627 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4628 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4629 update_add_count, update_fulfill_count, update_fail_count);
4631 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4632 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4638 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4639 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4640 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4641 /// generating an appropriate error *after* the channel state has been updated based on the
4642 /// revoke_and_ack message.
4643 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4644 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4645 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4646 where F::Target: FeeEstimator, L::Target: Logger,
4648 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4649 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4651 if self.context.channel_state.is_peer_disconnected() {
4652 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4654 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4655 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4658 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4660 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4661 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4662 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4666 if !self.context.channel_state.is_awaiting_remote_revoke() {
4667 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4668 // haven't given them a new commitment transaction to broadcast). We should probably
4669 // take advantage of this by updating our channel monitor, sending them an error, and
4670 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4671 // lot of work, and there's some chance this is all a misunderstanding anyway.
4672 // We have to do *something*, though, since our signer may get mad at us for otherwise
4673 // jumping a remote commitment number, so best to just force-close and move on.
4674 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4677 #[cfg(any(test, fuzzing))]
4679 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4680 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4683 match &self.context.holder_signer {
4684 ChannelSignerType::Ecdsa(ecdsa) => {
4685 ecdsa.validate_counterparty_revocation(
4686 self.context.cur_counterparty_commitment_transaction_number + 1,
4688 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4690 // TODO (taproot|arik)
4695 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4696 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4697 self.context.latest_monitor_update_id += 1;
4698 let mut monitor_update = ChannelMonitorUpdate {
4699 update_id: self.context.latest_monitor_update_id,
4700 counterparty_node_id: Some(self.context.counterparty_node_id),
4701 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4702 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4703 secret: msg.per_commitment_secret,
4705 channel_id: Some(self.context.channel_id()),
4708 // Update state now that we've passed all the can-fail calls...
4709 // (note that we may still fail to generate the new commitment_signed message, but that's
4710 // OK, we step the channel here and *then* if the new generation fails we can fail the
4711 // channel based on that, but stepping stuff here should be safe either way.
4712 self.context.channel_state.clear_awaiting_remote_revoke();
4713 self.context.sent_message_awaiting_response = None;
4714 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4715 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4716 self.context.cur_counterparty_commitment_transaction_number -= 1;
4718 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4719 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4722 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4723 let mut to_forward_infos = Vec::new();
4724 let mut revoked_htlcs = Vec::new();
4725 let mut finalized_claimed_htlcs = Vec::new();
4726 let mut update_fail_htlcs = Vec::new();
4727 let mut update_fail_malformed_htlcs = Vec::new();
4728 let mut require_commitment = false;
4729 let mut value_to_self_msat_diff: i64 = 0;
4732 // Take references explicitly so that we can hold multiple references to self.context.
4733 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4734 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4735 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4737 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4738 pending_inbound_htlcs.retain(|htlc| {
4739 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4740 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4741 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4742 value_to_self_msat_diff += htlc.amount_msat as i64;
4744 *expecting_peer_commitment_signed = true;
4748 pending_outbound_htlcs.retain(|htlc| {
4749 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4750 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4751 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4752 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4754 finalized_claimed_htlcs.push(htlc.source.clone());
4755 // They fulfilled, so we sent them money
4756 value_to_self_msat_diff -= htlc.amount_msat as i64;
4761 for htlc in pending_inbound_htlcs.iter_mut() {
4762 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4764 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4768 let mut state = InboundHTLCState::Committed;
4769 mem::swap(&mut state, &mut htlc.state);
4771 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4772 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4773 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4774 require_commitment = true;
4775 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4776 match forward_info {
4777 PendingHTLCStatus::Fail(fail_msg) => {
4778 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4779 require_commitment = true;
4781 HTLCFailureMsg::Relay(msg) => {
4782 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4783 update_fail_htlcs.push(msg)
4785 HTLCFailureMsg::Malformed(msg) => {
4786 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4787 update_fail_malformed_htlcs.push(msg)
4791 PendingHTLCStatus::Forward(forward_info) => {
4792 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4793 to_forward_infos.push((forward_info, htlc.htlc_id));
4794 htlc.state = InboundHTLCState::Committed;
4800 for htlc in pending_outbound_htlcs.iter_mut() {
4801 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4802 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4803 htlc.state = OutboundHTLCState::Committed;
4804 *expecting_peer_commitment_signed = true;
4806 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4807 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4808 // Grab the preimage, if it exists, instead of cloning
4809 let mut reason = OutboundHTLCOutcome::Success(None);
4810 mem::swap(outcome, &mut reason);
4811 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4812 require_commitment = true;
4816 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4818 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4819 match update_state {
4820 FeeUpdateState::Outbound => {
4821 debug_assert!(self.context.is_outbound());
4822 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4823 self.context.feerate_per_kw = feerate;
4824 self.context.pending_update_fee = None;
4825 self.context.expecting_peer_commitment_signed = true;
4827 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4828 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4829 debug_assert!(!self.context.is_outbound());
4830 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4831 require_commitment = true;
4832 self.context.feerate_per_kw = feerate;
4833 self.context.pending_update_fee = None;
4838 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4839 let release_state_str =
4840 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4841 macro_rules! return_with_htlcs_to_fail {
4842 ($htlcs_to_fail: expr) => {
4843 if !release_monitor {
4844 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4845 update: monitor_update,
4847 return Ok(($htlcs_to_fail, None));
4849 return Ok(($htlcs_to_fail, Some(monitor_update)));
4854 if self.context.channel_state.is_monitor_update_in_progress() {
4855 // We can't actually generate a new commitment transaction (incl by freeing holding
4856 // cells) while we can't update the monitor, so we just return what we have.
4857 if require_commitment {
4858 self.context.monitor_pending_commitment_signed = true;
4859 // When the monitor updating is restored we'll call
4860 // get_last_commitment_update_for_send(), which does not update state, but we're
4861 // definitely now awaiting a remote revoke before we can step forward any more, so
4863 let mut additional_update = self.build_commitment_no_status_check(logger);
4864 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4865 // strictly increasing by one, so decrement it here.
4866 self.context.latest_monitor_update_id = monitor_update.update_id;
4867 monitor_update.updates.append(&mut additional_update.updates);
4869 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4870 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4871 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4872 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4873 return_with_htlcs_to_fail!(Vec::new());
4876 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4877 (Some(mut additional_update), htlcs_to_fail) => {
4878 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4879 // strictly increasing by one, so decrement it here.
4880 self.context.latest_monitor_update_id = monitor_update.update_id;
4881 monitor_update.updates.append(&mut additional_update.updates);
4883 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4884 &self.context.channel_id(), release_state_str);
4886 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4887 return_with_htlcs_to_fail!(htlcs_to_fail);
4889 (None, htlcs_to_fail) => {
4890 if require_commitment {
4891 let mut additional_update = self.build_commitment_no_status_check(logger);
4893 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4894 // strictly increasing by one, so decrement it here.
4895 self.context.latest_monitor_update_id = monitor_update.update_id;
4896 monitor_update.updates.append(&mut additional_update.updates);
4898 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4899 &self.context.channel_id(),
4900 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4903 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4904 return_with_htlcs_to_fail!(htlcs_to_fail);
4906 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4907 &self.context.channel_id(), release_state_str);
4909 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4910 return_with_htlcs_to_fail!(htlcs_to_fail);
4916 /// Queues up an outbound update fee by placing it in the holding cell. You should call
4917 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4918 /// commitment update.
4919 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4920 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4921 where F::Target: FeeEstimator, L::Target: Logger
4923 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4924 assert!(msg_opt.is_none(), "We forced holding cell?");
4927 /// Adds a pending update to this channel. See the doc for send_htlc for
4928 /// further details on the optionness of the return value.
4929 /// If our balance is too low to cover the cost of the next commitment transaction at the
4930 /// new feerate, the update is cancelled.
4932 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4933 /// [`Channel`] if `force_holding_cell` is false.
4934 fn send_update_fee<F: Deref, L: Deref>(
4935 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4936 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4937 ) -> Option<msgs::UpdateFee>
4938 where F::Target: FeeEstimator, L::Target: Logger
4940 if !self.context.is_outbound() {
4941 panic!("Cannot send fee from inbound channel");
4943 if !self.context.is_usable() {
4944 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4946 if !self.context.is_live() {
4947 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4950 // Before proposing a feerate update, check that we can actually afford the new fee.
4951 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4952 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4953 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4954 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4955 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4956 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4957 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4958 //TODO: auto-close after a number of failures?
4959 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4963 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4964 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4965 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4966 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4967 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4968 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4971 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4972 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4976 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4977 force_holding_cell = true;
4980 if force_holding_cell {
4981 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4985 debug_assert!(self.context.pending_update_fee.is_none());
4986 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4988 Some(msgs::UpdateFee {
4989 channel_id: self.context.channel_id,
4994 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4995 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4997 /// No further message handling calls may be made until a channel_reestablish dance has
4999 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5000 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5001 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5002 if self.context.channel_state.is_pre_funded_state() {
5006 if self.context.channel_state.is_peer_disconnected() {
5007 // While the below code should be idempotent, it's simpler to just return early, as
5008 // redundant disconnect events can fire, though they should be rare.
5012 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5013 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5016 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5017 // will be retransmitted.
5018 self.context.last_sent_closing_fee = None;
5019 self.context.pending_counterparty_closing_signed = None;
5020 self.context.closing_fee_limits = None;
5022 let mut inbound_drop_count = 0;
5023 self.context.pending_inbound_htlcs.retain(|htlc| {
5025 InboundHTLCState::RemoteAnnounced(_) => {
5026 // They sent us an update_add_htlc but we never got the commitment_signed.
5027 // We'll tell them what commitment_signed we're expecting next and they'll drop
5028 // this HTLC accordingly
5029 inbound_drop_count += 1;
5032 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5033 // We received a commitment_signed updating this HTLC and (at least hopefully)
5034 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5035 // in response to it yet, so don't touch it.
5038 InboundHTLCState::Committed => true,
5039 InboundHTLCState::LocalRemoved(_) => {
5040 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5041 // re-transmit if needed) and they may have even sent a revoke_and_ack back
5042 // (that we missed). Keep this around for now and if they tell us they missed
5043 // the commitment_signed we can re-transmit the update then.
5048 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5050 if let Some((_, update_state)) = self.context.pending_update_fee {
5051 if update_state == FeeUpdateState::RemoteAnnounced {
5052 debug_assert!(!self.context.is_outbound());
5053 self.context.pending_update_fee = None;
5057 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5058 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5059 // They sent us an update to remove this but haven't yet sent the corresponding
5060 // commitment_signed, we need to move it back to Committed and they can re-send
5061 // the update upon reconnection.
5062 htlc.state = OutboundHTLCState::Committed;
5066 self.context.sent_message_awaiting_response = None;
5068 self.context.channel_state.set_peer_disconnected();
5069 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5073 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5074 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5075 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5076 /// update completes (potentially immediately).
5077 /// The messages which were generated with the monitor update must *not* have been sent to the
5078 /// remote end, and must instead have been dropped. They will be regenerated when
5079 /// [`Self::monitor_updating_restored`] is called.
5081 /// [`ChannelManager`]: super::channelmanager::ChannelManager
5082 /// [`chain::Watch`]: crate::chain::Watch
5083 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5084 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5085 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5086 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5087 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5089 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5090 self.context.monitor_pending_commitment_signed |= resend_commitment;
5091 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5092 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5093 self.context.monitor_pending_failures.append(&mut pending_fails);
5094 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5095 self.context.channel_state.set_monitor_update_in_progress();
5098 /// Indicates that the latest ChannelMonitor update has been committed by the client
5099 /// successfully and we should restore normal operation. Returns messages which should be sent
5100 /// to the remote side.
5101 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5102 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5103 user_config: &UserConfig, best_block_height: u32
5104 ) -> MonitorRestoreUpdates
5107 NS::Target: NodeSigner
5109 assert!(self.context.channel_state.is_monitor_update_in_progress());
5110 self.context.channel_state.clear_monitor_update_in_progress();
5112 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5113 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5114 // first received the funding_signed.
5115 let mut funding_broadcastable =
5116 if self.context.is_outbound() &&
5117 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5118 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5120 self.context.funding_transaction.take()
5122 // That said, if the funding transaction is already confirmed (ie we're active with a
5123 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5124 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5125 funding_broadcastable = None;
5128 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5129 // (and we assume the user never directly broadcasts the funding transaction and waits for
5130 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5131 // * an inbound channel that failed to persist the monitor on funding_created and we got
5132 // the funding transaction confirmed before the monitor was persisted, or
5133 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5134 let channel_ready = if self.context.monitor_pending_channel_ready {
5135 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5136 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5137 self.context.monitor_pending_channel_ready = false;
5138 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5139 Some(msgs::ChannelReady {
5140 channel_id: self.context.channel_id(),
5141 next_per_commitment_point,
5142 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5146 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5148 let mut accepted_htlcs = Vec::new();
5149 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5150 let mut failed_htlcs = Vec::new();
5151 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5152 let mut finalized_claimed_htlcs = Vec::new();
5153 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5155 if self.context.channel_state.is_peer_disconnected() {
5156 self.context.monitor_pending_revoke_and_ack = false;
5157 self.context.monitor_pending_commitment_signed = false;
5158 return MonitorRestoreUpdates {
5159 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5160 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5164 let raa = if self.context.monitor_pending_revoke_and_ack {
5165 Some(self.get_last_revoke_and_ack())
5167 let commitment_update = if self.context.monitor_pending_commitment_signed {
5168 self.get_last_commitment_update_for_send(logger).ok()
5170 if commitment_update.is_some() {
5171 self.mark_awaiting_response();
5174 self.context.monitor_pending_revoke_and_ack = false;
5175 self.context.monitor_pending_commitment_signed = false;
5176 let order = self.context.resend_order.clone();
5177 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5178 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5179 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5180 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5181 MonitorRestoreUpdates {
5182 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
5186 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5187 where F::Target: FeeEstimator, L::Target: Logger
5189 if self.context.is_outbound() {
5190 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5192 if self.context.channel_state.is_peer_disconnected() {
5193 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5195 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5197 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5198 self.context.update_time_counter += 1;
5199 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5200 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
5201 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
5202 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
5203 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
5204 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
5205 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5206 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
5207 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5208 msg.feerate_per_kw, holder_tx_dust_exposure)));
5210 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
5211 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5212 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
5218 /// Indicates that the signer may have some signatures for us, so we should retry if we're
5220 #[cfg(async_signing)]
5221 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5222 let commitment_update = if self.context.signer_pending_commitment_update {
5223 self.get_last_commitment_update_for_send(logger).ok()
5225 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5226 self.context.get_funding_signed_msg(logger).1
5228 let channel_ready = if funding_signed.is_some() {
5229 self.check_get_channel_ready(0)
5232 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5233 if commitment_update.is_some() { "a" } else { "no" },
5234 if funding_signed.is_some() { "a" } else { "no" },
5235 if channel_ready.is_some() { "a" } else { "no" });
5237 SignerResumeUpdates {
5244 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5245 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5246 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5247 msgs::RevokeAndACK {
5248 channel_id: self.context.channel_id,
5249 per_commitment_secret,
5250 next_per_commitment_point,
5252 next_local_nonce: None,
5256 /// Gets the last commitment update for immediate sending to our peer.
5257 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5258 let mut update_add_htlcs = Vec::new();
5259 let mut update_fulfill_htlcs = Vec::new();
5260 let mut update_fail_htlcs = Vec::new();
5261 let mut update_fail_malformed_htlcs = Vec::new();
5263 for htlc in self.context.pending_outbound_htlcs.iter() {
5264 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5265 update_add_htlcs.push(msgs::UpdateAddHTLC {
5266 channel_id: self.context.channel_id(),
5267 htlc_id: htlc.htlc_id,
5268 amount_msat: htlc.amount_msat,
5269 payment_hash: htlc.payment_hash,
5270 cltv_expiry: htlc.cltv_expiry,
5271 onion_routing_packet: (**onion_packet).clone(),
5272 skimmed_fee_msat: htlc.skimmed_fee_msat,
5273 blinding_point: htlc.blinding_point,
5278 for htlc in self.context.pending_inbound_htlcs.iter() {
5279 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5281 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5282 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5283 channel_id: self.context.channel_id(),
5284 htlc_id: htlc.htlc_id,
5285 reason: err_packet.clone()
5288 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5289 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5290 channel_id: self.context.channel_id(),
5291 htlc_id: htlc.htlc_id,
5292 sha256_of_onion: sha256_of_onion.clone(),
5293 failure_code: failure_code.clone(),
5296 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5297 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5298 channel_id: self.context.channel_id(),
5299 htlc_id: htlc.htlc_id,
5300 payment_preimage: payment_preimage.clone(),
5307 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5308 Some(msgs::UpdateFee {
5309 channel_id: self.context.channel_id(),
5310 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5314 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5315 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5316 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5317 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5318 if self.context.signer_pending_commitment_update {
5319 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5320 self.context.signer_pending_commitment_update = false;
5324 #[cfg(not(async_signing))] {
5325 panic!("Failed to get signature for new commitment state");
5327 #[cfg(async_signing)] {
5328 if !self.context.signer_pending_commitment_update {
5329 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5330 self.context.signer_pending_commitment_update = true;
5335 Ok(msgs::CommitmentUpdate {
5336 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5341 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5342 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5343 if self.context.channel_state.is_local_shutdown_sent() {
5344 assert!(self.context.shutdown_scriptpubkey.is_some());
5345 Some(msgs::Shutdown {
5346 channel_id: self.context.channel_id,
5347 scriptpubkey: self.get_closing_scriptpubkey(),
5352 /// May panic if some calls other than message-handling calls (which will all Err immediately)
5353 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5355 /// Some links printed in log lines are included here to check them during build (when run with
5356 /// `cargo doc --document-private-items`):
5357 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5358 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5359 pub fn channel_reestablish<L: Deref, NS: Deref>(
5360 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5361 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5362 ) -> Result<ReestablishResponses, ChannelError>
5365 NS::Target: NodeSigner
5367 if !self.context.channel_state.is_peer_disconnected() {
5368 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5369 // almost certainly indicates we are going to end up out-of-sync in some way, so we
5370 // just close here instead of trying to recover.
5371 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5374 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5375 msg.next_local_commitment_number == 0 {
5376 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5379 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5380 if msg.next_remote_commitment_number > 0 {
5381 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5382 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5383 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5384 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5385 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5387 if msg.next_remote_commitment_number > our_commitment_transaction {
5388 macro_rules! log_and_panic {
5389 ($err_msg: expr) => {
5390 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5391 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5394 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5395 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5396 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5397 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5398 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5399 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5400 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5401 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5405 // Before we change the state of the channel, we check if the peer is sending a very old
5406 // commitment transaction number, if yes we send a warning message.
5407 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5408 return Err(ChannelError::Warn(format!(
5409 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5410 msg.next_remote_commitment_number,
5411 our_commitment_transaction
5415 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5416 // remaining cases either succeed or ErrorMessage-fail).
5417 self.context.channel_state.clear_peer_disconnected();
5418 self.context.sent_message_awaiting_response = None;
5420 let shutdown_msg = self.get_outbound_shutdown();
5422 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
5424 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5425 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5426 if !self.context.channel_state.is_our_channel_ready() ||
5427 self.context.channel_state.is_monitor_update_in_progress() {
5428 if msg.next_remote_commitment_number != 0 {
5429 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5431 // Short circuit the whole handler as there is nothing we can resend them
5432 return Ok(ReestablishResponses {
5433 channel_ready: None,
5434 raa: None, commitment_update: None,
5435 order: RAACommitmentOrder::CommitmentFirst,
5436 shutdown_msg, announcement_sigs,
5440 // We have OurChannelReady set!
5441 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5442 return Ok(ReestablishResponses {
5443 channel_ready: Some(msgs::ChannelReady {
5444 channel_id: self.context.channel_id(),
5445 next_per_commitment_point,
5446 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5448 raa: None, commitment_update: None,
5449 order: RAACommitmentOrder::CommitmentFirst,
5450 shutdown_msg, announcement_sigs,
5454 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5455 // Remote isn't waiting on any RevokeAndACK from us!
5456 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5458 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5459 if self.context.channel_state.is_monitor_update_in_progress() {
5460 self.context.monitor_pending_revoke_and_ack = true;
5463 Some(self.get_last_revoke_and_ack())
5466 debug_assert!(false, "All values should have been handled in the four cases above");
5467 return Err(ChannelError::Close(format!(
5468 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5469 msg.next_remote_commitment_number,
5470 our_commitment_transaction
5474 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5475 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5476 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5477 // the corresponding revoke_and_ack back yet.
5478 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5479 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5480 self.mark_awaiting_response();
5482 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5484 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5485 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5486 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5487 Some(msgs::ChannelReady {
5488 channel_id: self.context.channel_id(),
5489 next_per_commitment_point,
5490 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5494 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5495 if required_revoke.is_some() {
5496 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5498 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5501 Ok(ReestablishResponses {
5502 channel_ready, shutdown_msg, announcement_sigs,
5503 raa: required_revoke,
5504 commitment_update: None,
5505 order: self.context.resend_order.clone(),
5507 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5508 if required_revoke.is_some() {
5509 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5511 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5514 if self.context.channel_state.is_monitor_update_in_progress() {
5515 self.context.monitor_pending_commitment_signed = true;
5516 Ok(ReestablishResponses {
5517 channel_ready, shutdown_msg, announcement_sigs,
5518 commitment_update: None, raa: None,
5519 order: self.context.resend_order.clone(),
5522 Ok(ReestablishResponses {
5523 channel_ready, shutdown_msg, announcement_sigs,
5524 raa: required_revoke,
5525 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5526 order: self.context.resend_order.clone(),
5529 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5530 Err(ChannelError::Close(format!(
5531 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5532 msg.next_local_commitment_number,
5533 next_counterparty_commitment_number,
5536 Err(ChannelError::Close(format!(
5537 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5538 msg.next_local_commitment_number,
5539 next_counterparty_commitment_number,
5544 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5545 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5546 /// at which point they will be recalculated.
5547 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5549 where F::Target: FeeEstimator
5551 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5553 // Propose a range from our current Background feerate to our Normal feerate plus our
5554 // force_close_avoidance_max_fee_satoshis.
5555 // If we fail to come to consensus, we'll have to force-close.
5556 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5557 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5558 // that we don't expect to need fee bumping
5559 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5560 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5562 // The spec requires that (when the channel does not have anchors) we only send absolute
5563 // channel fees no greater than the absolute channel fee on the current commitment
5564 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5565 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5566 // some force-closure by old nodes, but we wanted to close the channel anyway.
5568 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5569 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5570 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5571 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5574 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5575 // below our dust limit, causing the output to disappear. We don't bother handling this
5576 // case, however, as this should only happen if a channel is closed before any (material)
5577 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5578 // come to consensus with our counterparty on appropriate fees, however it should be a
5579 // relatively rare case. We can revisit this later, though note that in order to determine
5580 // if the funders' output is dust we have to know the absolute fee we're going to use.
5581 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5582 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5583 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5584 // We always add force_close_avoidance_max_fee_satoshis to our normal
5585 // feerate-calculated fee, but allow the max to be overridden if we're using a
5586 // target feerate-calculated fee.
5587 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5588 proposed_max_feerate as u64 * tx_weight / 1000)
5590 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5593 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5594 self.context.closing_fee_limits.clone().unwrap()
5597 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5598 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5599 /// this point if we're the funder we should send the initial closing_signed, and in any case
5600 /// shutdown should complete within a reasonable timeframe.
5601 fn closing_negotiation_ready(&self) -> bool {
5602 self.context.closing_negotiation_ready()
5605 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5606 /// an Err if no progress is being made and the channel should be force-closed instead.
5607 /// Should be called on a one-minute timer.
5608 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5609 if self.closing_negotiation_ready() {
5610 if self.context.closing_signed_in_flight {
5611 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5613 self.context.closing_signed_in_flight = true;
5619 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5620 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5621 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5622 where F::Target: FeeEstimator, L::Target: Logger
5624 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5625 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5626 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5627 // that closing_negotiation_ready checks this case (as well as a few others).
5628 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5629 return Ok((None, None, None));
5632 if !self.context.is_outbound() {
5633 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5634 return self.closing_signed(fee_estimator, &msg);
5636 return Ok((None, None, None));
5639 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5640 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5641 if self.context.expecting_peer_commitment_signed {
5642 return Ok((None, None, None));
5645 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5647 assert!(self.context.shutdown_scriptpubkey.is_some());
5648 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5649 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5650 our_min_fee, our_max_fee, total_fee_satoshis);
5652 match &self.context.holder_signer {
5653 ChannelSignerType::Ecdsa(ecdsa) => {
5655 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5656 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5658 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5659 Ok((Some(msgs::ClosingSigned {
5660 channel_id: self.context.channel_id,
5661 fee_satoshis: total_fee_satoshis,
5663 fee_range: Some(msgs::ClosingSignedFeeRange {
5664 min_fee_satoshis: our_min_fee,
5665 max_fee_satoshis: our_max_fee,
5669 // TODO (taproot|arik)
5675 // Marks a channel as waiting for a response from the counterparty. If it's not received
5676 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5678 fn mark_awaiting_response(&mut self) {
5679 self.context.sent_message_awaiting_response = Some(0);
5682 /// Determines whether we should disconnect the counterparty due to not receiving a response
5683 /// within our expected timeframe.
5685 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5686 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5687 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5690 // Don't disconnect when we're not waiting on a response.
5693 *ticks_elapsed += 1;
5694 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5698 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5699 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5701 if self.context.channel_state.is_peer_disconnected() {
5702 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5704 if self.context.channel_state.is_pre_funded_state() {
5705 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5706 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5707 // can do that via error message without getting a connection fail anyway...
5708 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5710 for htlc in self.context.pending_inbound_htlcs.iter() {
5711 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5712 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5715 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5717 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5718 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5721 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5722 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5723 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5726 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5729 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5730 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5731 // any further commitment updates after we set LocalShutdownSent.
5732 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5734 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5737 assert!(send_shutdown);
5738 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5739 Ok(scriptpubkey) => scriptpubkey,
5740 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5742 if !shutdown_scriptpubkey.is_compatible(their_features) {
5743 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5745 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5750 // From here on out, we may not fail!
5752 self.context.channel_state.set_remote_shutdown_sent();
5753 self.context.update_time_counter += 1;
5755 let monitor_update = if update_shutdown_script {
5756 self.context.latest_monitor_update_id += 1;
5757 let monitor_update = ChannelMonitorUpdate {
5758 update_id: self.context.latest_monitor_update_id,
5759 counterparty_node_id: Some(self.context.counterparty_node_id),
5760 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5761 scriptpubkey: self.get_closing_scriptpubkey(),
5763 channel_id: Some(self.context.channel_id()),
5765 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5766 self.push_ret_blockable_mon_update(monitor_update)
5768 let shutdown = if send_shutdown {
5769 Some(msgs::Shutdown {
5770 channel_id: self.context.channel_id,
5771 scriptpubkey: self.get_closing_scriptpubkey(),
5775 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5776 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5777 // cell HTLCs and return them to fail the payment.
5778 self.context.holding_cell_update_fee = None;
5779 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5780 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5782 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5783 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5790 self.context.channel_state.set_local_shutdown_sent();
5791 self.context.update_time_counter += 1;
5793 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5796 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5797 let mut tx = closing_tx.trust().built_transaction().clone();
5799 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5801 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5802 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5803 let mut holder_sig = sig.serialize_der().to_vec();
5804 holder_sig.push(EcdsaSighashType::All as u8);
5805 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5806 cp_sig.push(EcdsaSighashType::All as u8);
5807 if funding_key[..] < counterparty_funding_key[..] {
5808 tx.input[0].witness.push(holder_sig);
5809 tx.input[0].witness.push(cp_sig);
5811 tx.input[0].witness.push(cp_sig);
5812 tx.input[0].witness.push(holder_sig);
5815 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5819 pub fn closing_signed<F: Deref>(
5820 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5821 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5822 where F::Target: FeeEstimator
5824 if !self.context.channel_state.is_both_sides_shutdown() {
5825 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5827 if self.context.channel_state.is_peer_disconnected() {
5828 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5830 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5831 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5833 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5834 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5837 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5838 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5841 if self.context.channel_state.is_monitor_update_in_progress() {
5842 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5843 return Ok((None, None, None));
5846 let funding_redeemscript = self.context.get_funding_redeemscript();
5847 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5848 if used_total_fee != msg.fee_satoshis {
5849 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5851 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5853 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5856 // The remote end may have decided to revoke their output due to inconsistent dust
5857 // limits, so check for that case by re-checking the signature here.
5858 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5859 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5860 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5864 for outp in closing_tx.trust().built_transaction().output.iter() {
5865 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5866 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5870 let closure_reason = if self.initiated_shutdown() {
5871 ClosureReason::LocallyInitiatedCooperativeClosure
5873 ClosureReason::CounterpartyInitiatedCooperativeClosure
5876 assert!(self.context.shutdown_scriptpubkey.is_some());
5877 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5878 if last_fee == msg.fee_satoshis {
5879 let shutdown_result = ShutdownResult {
5881 monitor_update: None,
5882 dropped_outbound_htlcs: Vec::new(),
5883 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5884 channel_id: self.context.channel_id,
5885 user_channel_id: self.context.user_id,
5886 channel_capacity_satoshis: self.context.channel_value_satoshis,
5887 counterparty_node_id: self.context.counterparty_node_id,
5888 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5889 channel_funding_txo: self.context.get_funding_txo(),
5891 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5892 self.context.channel_state = ChannelState::ShutdownComplete;
5893 self.context.update_time_counter += 1;
5894 return Ok((None, Some(tx), Some(shutdown_result)));
5898 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5900 macro_rules! propose_fee {
5901 ($new_fee: expr) => {
5902 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5903 (closing_tx, $new_fee)
5905 self.build_closing_transaction($new_fee, false)
5908 return match &self.context.holder_signer {
5909 ChannelSignerType::Ecdsa(ecdsa) => {
5911 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5912 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5913 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5914 let shutdown_result = ShutdownResult {
5916 monitor_update: None,
5917 dropped_outbound_htlcs: Vec::new(),
5918 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5919 channel_id: self.context.channel_id,
5920 user_channel_id: self.context.user_id,
5921 channel_capacity_satoshis: self.context.channel_value_satoshis,
5922 counterparty_node_id: self.context.counterparty_node_id,
5923 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5924 channel_funding_txo: self.context.get_funding_txo(),
5926 self.context.channel_state = ChannelState::ShutdownComplete;
5927 self.context.update_time_counter += 1;
5928 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5929 (Some(tx), Some(shutdown_result))
5934 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5935 Ok((Some(msgs::ClosingSigned {
5936 channel_id: self.context.channel_id,
5937 fee_satoshis: used_fee,
5939 fee_range: Some(msgs::ClosingSignedFeeRange {
5940 min_fee_satoshis: our_min_fee,
5941 max_fee_satoshis: our_max_fee,
5943 }), signed_tx, shutdown_result))
5945 // TODO (taproot|arik)
5952 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5953 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5954 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5956 if max_fee_satoshis < our_min_fee {
5957 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5959 if min_fee_satoshis > our_max_fee {
5960 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5963 if !self.context.is_outbound() {
5964 // They have to pay, so pick the highest fee in the overlapping range.
5965 // We should never set an upper bound aside from their full balance
5966 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5967 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5969 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5970 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5971 msg.fee_satoshis, our_min_fee, our_max_fee)));
5973 // The proposed fee is in our acceptable range, accept it and broadcast!
5974 propose_fee!(msg.fee_satoshis);
5977 // Old fee style negotiation. We don't bother to enforce whether they are complying
5978 // with the "making progress" requirements, we just comply and hope for the best.
5979 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5980 if msg.fee_satoshis > last_fee {
5981 if msg.fee_satoshis < our_max_fee {
5982 propose_fee!(msg.fee_satoshis);
5983 } else if last_fee < our_max_fee {
5984 propose_fee!(our_max_fee);
5986 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5989 if msg.fee_satoshis > our_min_fee {
5990 propose_fee!(msg.fee_satoshis);
5991 } else if last_fee > our_min_fee {
5992 propose_fee!(our_min_fee);
5994 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5998 if msg.fee_satoshis < our_min_fee {
5999 propose_fee!(our_min_fee);
6000 } else if msg.fee_satoshis > our_max_fee {
6001 propose_fee!(our_max_fee);
6003 propose_fee!(msg.fee_satoshis);
6009 fn internal_htlc_satisfies_config(
6010 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6011 ) -> Result<(), (&'static str, u16)> {
6012 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6013 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6014 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6015 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6017 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6018 0x1000 | 12, // fee_insufficient
6021 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6023 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6024 0x1000 | 13, // incorrect_cltv_expiry
6030 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6031 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6032 /// unsuccessful, falls back to the previous one if one exists.
6033 pub fn htlc_satisfies_config(
6034 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6035 ) -> Result<(), (&'static str, u16)> {
6036 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6038 if let Some(prev_config) = self.context.prev_config() {
6039 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6046 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6047 self.context.cur_holder_commitment_transaction_number + 1
6050 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6051 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6054 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6055 self.context.cur_counterparty_commitment_transaction_number + 2
6059 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6060 &self.context.holder_signer
6064 pub fn get_value_stat(&self) -> ChannelValueStat {
6066 value_to_self_msat: self.context.value_to_self_msat,
6067 channel_value_msat: self.context.channel_value_satoshis * 1000,
6068 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6069 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6070 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6071 holding_cell_outbound_amount_msat: {
6073 for h in self.context.holding_cell_htlc_updates.iter() {
6075 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6083 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6084 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6088 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6089 /// Allowed in any state (including after shutdown)
6090 pub fn is_awaiting_monitor_update(&self) -> bool {
6091 self.context.channel_state.is_monitor_update_in_progress()
6094 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6095 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6096 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6097 self.context.blocked_monitor_updates[0].update.update_id - 1
6100 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6101 /// further blocked monitor update exists after the next.
6102 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6103 if self.context.blocked_monitor_updates.is_empty() { return None; }
6104 Some((self.context.blocked_monitor_updates.remove(0).update,
6105 !self.context.blocked_monitor_updates.is_empty()))
6108 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6109 /// immediately given to the user for persisting or `None` if it should be held as blocked.
6110 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6111 -> Option<ChannelMonitorUpdate> {
6112 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6113 if !release_monitor {
6114 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6123 pub fn blocked_monitor_updates_pending(&self) -> usize {
6124 self.context.blocked_monitor_updates.len()
6127 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6128 /// If the channel is outbound, this implies we have not yet broadcasted the funding
6129 /// transaction. If the channel is inbound, this implies simply that the channel has not
6131 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6132 if !self.is_awaiting_monitor_update() { return false; }
6134 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6135 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6137 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6138 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6139 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6142 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6143 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6144 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6145 // waiting for the initial monitor persistence. Thus, we check if our commitment
6146 // transaction numbers have both been iterated only exactly once (for the
6147 // funding_signed), and we're awaiting monitor update.
6149 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6150 // only way to get an awaiting-monitor-update state during initial funding is if the
6151 // initial monitor persistence is still pending).
6153 // Because deciding we're awaiting initial broadcast spuriously could result in
6154 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6155 // we hard-assert here, even in production builds.
6156 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6157 assert!(self.context.monitor_pending_channel_ready);
6158 assert_eq!(self.context.latest_monitor_update_id, 0);
6164 /// Returns true if our channel_ready has been sent
6165 pub fn is_our_channel_ready(&self) -> bool {
6166 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6167 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6170 /// Returns true if our peer has either initiated or agreed to shut down the channel.
6171 pub fn received_shutdown(&self) -> bool {
6172 self.context.channel_state.is_remote_shutdown_sent()
6175 /// Returns true if we either initiated or agreed to shut down the channel.
6176 pub fn sent_shutdown(&self) -> bool {
6177 self.context.channel_state.is_local_shutdown_sent()
6180 /// Returns true if we initiated to shut down the channel.
6181 pub fn initiated_shutdown(&self) -> bool {
6182 self.context.local_initiated_shutdown.is_some()
6185 /// Returns true if this channel is fully shut down. True here implies that no further actions
6186 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6187 /// will be handled appropriately by the chain monitor.
6188 pub fn is_shutdown(&self) -> bool {
6189 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6192 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6193 self.context.channel_update_status
6196 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6197 self.context.update_time_counter += 1;
6198 self.context.channel_update_status = status;
6201 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6203 // * always when a new block/transactions are confirmed with the new height
6204 // * when funding is signed with a height of 0
6205 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6209 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6210 if funding_tx_confirmations <= 0 {
6211 self.context.funding_tx_confirmation_height = 0;
6214 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6218 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6219 // channel_ready yet.
6220 if self.context.signer_pending_funding {
6224 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6225 // channel_ready until the entire batch is ready.
6226 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6227 self.context.channel_state.set_our_channel_ready();
6229 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6230 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6231 self.context.update_time_counter += 1;
6233 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6234 // We got a reorg but not enough to trigger a force close, just ignore.
6237 if self.context.funding_tx_confirmation_height != 0 &&
6238 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6240 // We should never see a funding transaction on-chain until we've received
6241 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6242 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6243 // however, may do this and we shouldn't treat it as a bug.
6244 #[cfg(not(fuzzing))]
6245 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6246 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6247 self.context.channel_state.to_u32());
6249 // We got a reorg but not enough to trigger a force close, just ignore.
6253 if need_commitment_update {
6254 if !self.context.channel_state.is_monitor_update_in_progress() {
6255 if !self.context.channel_state.is_peer_disconnected() {
6256 let next_per_commitment_point =
6257 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6258 return Some(msgs::ChannelReady {
6259 channel_id: self.context.channel_id,
6260 next_per_commitment_point,
6261 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6265 self.context.monitor_pending_channel_ready = true;
6271 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6272 /// In the first case, we store the confirmation height and calculating the short channel id.
6273 /// In the second, we simply return an Err indicating we need to be force-closed now.
6274 pub fn transactions_confirmed<NS: Deref, L: Deref>(
6275 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6276 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6277 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6279 NS::Target: NodeSigner,
6282 let mut msgs = (None, None);
6283 if let Some(funding_txo) = self.context.get_funding_txo() {
6284 for &(index_in_block, tx) in txdata.iter() {
6285 // Check if the transaction is the expected funding transaction, and if it is,
6286 // check that it pays the right amount to the right script.
6287 if self.context.funding_tx_confirmation_height == 0 {
6288 if tx.txid() == funding_txo.txid {
6289 let txo_idx = funding_txo.index as usize;
6290 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6291 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6292 if self.context.is_outbound() {
6293 // If we generated the funding transaction and it doesn't match what it
6294 // should, the client is really broken and we should just panic and
6295 // tell them off. That said, because hash collisions happen with high
6296 // probability in fuzzing mode, if we're fuzzing we just close the
6297 // channel and move on.
6298 #[cfg(not(fuzzing))]
6299 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6301 self.context.update_time_counter += 1;
6302 let err_reason = "funding tx had wrong script/value or output index";
6303 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6305 if self.context.is_outbound() {
6306 if !tx.is_coin_base() {
6307 for input in tx.input.iter() {
6308 if input.witness.is_empty() {
6309 // We generated a malleable funding transaction, implying we've
6310 // just exposed ourselves to funds loss to our counterparty.
6311 #[cfg(not(fuzzing))]
6312 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6317 self.context.funding_tx_confirmation_height = height;
6318 self.context.funding_tx_confirmed_in = Some(*block_hash);
6319 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6320 Ok(scid) => Some(scid),
6321 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6324 // If this is a coinbase transaction and not a 0-conf channel
6325 // we should update our min_depth to 100 to handle coinbase maturity
6326 if tx.is_coin_base() &&
6327 self.context.minimum_depth.unwrap_or(0) > 0 &&
6328 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6329 self.context.minimum_depth = Some(COINBASE_MATURITY);
6332 // If we allow 1-conf funding, we may need to check for channel_ready here and
6333 // send it immediately instead of waiting for a best_block_updated call (which
6334 // may have already happened for this block).
6335 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6336 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6337 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6338 msgs = (Some(channel_ready), announcement_sigs);
6341 for inp in tx.input.iter() {
6342 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6343 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6344 return Err(ClosureReason::CommitmentTxConfirmed);
6352 /// When a new block is connected, we check the height of the block against outbound holding
6353 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6354 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6355 /// handled by the ChannelMonitor.
6357 /// If we return Err, the channel may have been closed, at which point the standard
6358 /// requirements apply - no calls may be made except those explicitly stated to be allowed
6361 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6363 pub fn best_block_updated<NS: Deref, L: Deref>(
6364 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6365 node_signer: &NS, user_config: &UserConfig, logger: &L
6366 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6368 NS::Target: NodeSigner,
6371 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6374 fn do_best_block_updated<NS: Deref, L: Deref>(
6375 &mut self, height: u32, highest_header_time: u32,
6376 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6377 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6379 NS::Target: NodeSigner,
6382 let mut timed_out_htlcs = Vec::new();
6383 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6384 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6386 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6387 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6389 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6390 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6391 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6399 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6401 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6402 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6403 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6405 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6406 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6409 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6410 self.context.channel_state.is_our_channel_ready() {
6411 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6412 if self.context.funding_tx_confirmation_height == 0 {
6413 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6414 // zero if it has been reorged out, however in either case, our state flags
6415 // indicate we've already sent a channel_ready
6416 funding_tx_confirmations = 0;
6419 // If we've sent channel_ready (or have both sent and received channel_ready), and
6420 // the funding transaction has become unconfirmed,
6421 // close the channel and hope we can get the latest state on chain (because presumably
6422 // the funding transaction is at least still in the mempool of most nodes).
6424 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6425 // 0-conf channel, but not doing so may lead to the
6426 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
6428 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6429 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6430 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6431 return Err(ClosureReason::ProcessingError { err: err_reason });
6433 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6434 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6435 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6436 // If funding_tx_confirmed_in is unset, the channel must not be active
6437 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6438 assert!(!self.context.channel_state.is_our_channel_ready());
6439 return Err(ClosureReason::FundingTimedOut);
6442 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6443 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6445 Ok((None, timed_out_htlcs, announcement_sigs))
6448 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6449 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6450 /// before the channel has reached channel_ready and we can just wait for more blocks.
6451 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6452 if self.context.funding_tx_confirmation_height != 0 {
6453 // We handle the funding disconnection by calling best_block_updated with a height one
6454 // below where our funding was connected, implying a reorg back to conf_height - 1.
6455 let reorg_height = self.context.funding_tx_confirmation_height - 1;
6456 // We use the time field to bump the current time we set on channel updates if its
6457 // larger. If we don't know that time has moved forward, we can just set it to the last
6458 // time we saw and it will be ignored.
6459 let best_time = self.context.update_time_counter;
6460 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6461 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6462 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6463 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6464 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6470 // We never learned about the funding confirmation anyway, just ignore
6475 // Methods to get unprompted messages to send to the remote end (or where we already returned
6476 // something in the handler for the message that prompted this message):
6478 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6479 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6480 /// directions). Should be used for both broadcasted announcements and in response to an
6481 /// AnnouncementSignatures message from the remote peer.
6483 /// Will only fail if we're not in a state where channel_announcement may be sent (including
6486 /// This will only return ChannelError::Ignore upon failure.
6488 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6489 fn get_channel_announcement<NS: Deref>(
6490 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6491 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6492 if !self.context.config.announced_channel {
6493 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6495 if !self.context.is_usable() {
6496 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6499 let short_channel_id = self.context.get_short_channel_id()
6500 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6501 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6502 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6503 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6504 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6506 let msg = msgs::UnsignedChannelAnnouncement {
6507 features: channelmanager::provided_channel_features(&user_config),
6510 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6511 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6512 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6513 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6514 excess_data: Vec::new(),
6520 fn get_announcement_sigs<NS: Deref, L: Deref>(
6521 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6522 best_block_height: u32, logger: &L
6523 ) -> Option<msgs::AnnouncementSignatures>
6525 NS::Target: NodeSigner,
6528 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6532 if !self.context.is_usable() {
6536 if self.context.channel_state.is_peer_disconnected() {
6537 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6541 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6545 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6546 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6549 log_trace!(logger, "{:?}", e);
6553 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6555 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6560 match &self.context.holder_signer {
6561 ChannelSignerType::Ecdsa(ecdsa) => {
6562 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6564 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6569 let short_channel_id = match self.context.get_short_channel_id() {
6571 None => return None,
6574 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6576 Some(msgs::AnnouncementSignatures {
6577 channel_id: self.context.channel_id(),
6579 node_signature: our_node_sig,
6580 bitcoin_signature: our_bitcoin_sig,
6583 // TODO (taproot|arik)
6589 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6591 fn sign_channel_announcement<NS: Deref>(
6592 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6593 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6594 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6595 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6596 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6597 let were_node_one = announcement.node_id_1 == our_node_key;
6599 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6600 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6601 match &self.context.holder_signer {
6602 ChannelSignerType::Ecdsa(ecdsa) => {
6603 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6604 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6605 Ok(msgs::ChannelAnnouncement {
6606 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6607 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6608 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6609 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6610 contents: announcement,
6613 // TODO (taproot|arik)
6618 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6622 /// Processes an incoming announcement_signatures message, providing a fully-signed
6623 /// channel_announcement message which we can broadcast and storing our counterparty's
6624 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6625 pub fn announcement_signatures<NS: Deref>(
6626 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6627 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6628 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6629 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6631 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6633 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6634 return Err(ChannelError::Close(format!(
6635 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6636 &announcement, self.context.get_counterparty_node_id())));
6638 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6639 return Err(ChannelError::Close(format!(
6640 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6641 &announcement, self.context.counterparty_funding_pubkey())));
6644 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6645 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6646 return Err(ChannelError::Ignore(
6647 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6650 self.sign_channel_announcement(node_signer, announcement)
6653 /// Gets a signed channel_announcement for this channel, if we previously received an
6654 /// announcement_signatures from our counterparty.
6655 pub fn get_signed_channel_announcement<NS: Deref>(
6656 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6657 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6658 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6661 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6663 Err(_) => return None,
6665 match self.sign_channel_announcement(node_signer, announcement) {
6666 Ok(res) => Some(res),
6671 /// May panic if called on a channel that wasn't immediately-previously
6672 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6673 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6674 assert!(self.context.channel_state.is_peer_disconnected());
6675 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6676 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6677 // current to_remote balances. However, it no longer has any use, and thus is now simply
6678 // set to a dummy (but valid, as required by the spec) public key.
6679 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6680 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6681 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6682 let mut pk = [2; 33]; pk[1] = 0xff;
6683 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6684 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6685 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6686 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6689 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6692 self.mark_awaiting_response();
6693 msgs::ChannelReestablish {
6694 channel_id: self.context.channel_id(),
6695 // The protocol has two different commitment number concepts - the "commitment
6696 // transaction number", which starts from 0 and counts up, and the "revocation key
6697 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6698 // commitment transaction numbers by the index which will be used to reveal the
6699 // revocation key for that commitment transaction, which means we have to convert them
6700 // to protocol-level commitment numbers here...
6702 // next_local_commitment_number is the next commitment_signed number we expect to
6703 // receive (indicating if they need to resend one that we missed).
6704 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6705 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6706 // receive, however we track it by the next commitment number for a remote transaction
6707 // (which is one further, as they always revoke previous commitment transaction, not
6708 // the one we send) so we have to decrement by 1. Note that if
6709 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6710 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6712 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6713 your_last_per_commitment_secret: remote_last_secret,
6714 my_current_per_commitment_point: dummy_pubkey,
6715 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6716 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6717 // txid of that interactive transaction, else we MUST NOT set it.
6718 next_funding_txid: None,
6723 // Send stuff to our remote peers:
6725 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6726 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6727 /// commitment update.
6729 /// `Err`s will only be [`ChannelError::Ignore`].
6730 pub fn queue_add_htlc<F: Deref, L: Deref>(
6731 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6732 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6733 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6734 ) -> Result<(), ChannelError>
6735 where F::Target: FeeEstimator, L::Target: Logger
6738 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6739 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6740 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6742 if let ChannelError::Ignore(_) = err { /* fine */ }
6743 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6748 /// Adds a pending outbound HTLC to this channel, note that you probably want
6749 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6751 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6753 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6754 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6756 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6757 /// we may not yet have sent the previous commitment update messages and will need to
6758 /// regenerate them.
6760 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6761 /// on this [`Channel`] if `force_holding_cell` is false.
6763 /// `Err`s will only be [`ChannelError::Ignore`].
6764 fn send_htlc<F: Deref, L: Deref>(
6765 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6766 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6767 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6768 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6769 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6770 where F::Target: FeeEstimator, L::Target: Logger
6772 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6773 self.context.channel_state.is_local_shutdown_sent() ||
6774 self.context.channel_state.is_remote_shutdown_sent()
6776 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6778 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6779 if amount_msat > channel_total_msat {
6780 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6783 if amount_msat == 0 {
6784 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6787 let available_balances = self.context.get_available_balances(fee_estimator);
6788 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6789 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6790 available_balances.next_outbound_htlc_minimum_msat)));
6793 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6794 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6795 available_balances.next_outbound_htlc_limit_msat)));
6798 if self.context.channel_state.is_peer_disconnected() {
6799 // Note that this should never really happen, if we're !is_live() on receipt of an
6800 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6801 // the user to send directly into a !is_live() channel. However, if we
6802 // disconnected during the time the previous hop was doing the commitment dance we may
6803 // end up getting here after the forwarding delay. In any case, returning an
6804 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6805 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6808 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6809 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6810 payment_hash, amount_msat,
6811 if force_holding_cell { "into holding cell" }
6812 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6813 else { "to peer" });
6815 if need_holding_cell {
6816 force_holding_cell = true;
6819 // Now update local state:
6820 if force_holding_cell {
6821 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6826 onion_routing_packet,
6833 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6834 htlc_id: self.context.next_holder_htlc_id,
6836 payment_hash: payment_hash.clone(),
6838 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6844 let res = msgs::UpdateAddHTLC {
6845 channel_id: self.context.channel_id,
6846 htlc_id: self.context.next_holder_htlc_id,
6850 onion_routing_packet,
6854 self.context.next_holder_htlc_id += 1;
6859 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6860 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6861 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6862 // fail to generate this, we still are at least at a position where upgrading their status
6864 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6865 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6866 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6868 if let Some(state) = new_state {
6869 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6873 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6874 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6875 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6876 // Grab the preimage, if it exists, instead of cloning
6877 let mut reason = OutboundHTLCOutcome::Success(None);
6878 mem::swap(outcome, &mut reason);
6879 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6882 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6883 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6884 debug_assert!(!self.context.is_outbound());
6885 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6886 self.context.feerate_per_kw = feerate;
6887 self.context.pending_update_fee = None;
6890 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6892 let (mut htlcs_ref, counterparty_commitment_tx) =
6893 self.build_commitment_no_state_update(logger);
6894 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6895 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6896 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6898 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6899 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6902 self.context.latest_monitor_update_id += 1;
6903 let monitor_update = ChannelMonitorUpdate {
6904 update_id: self.context.latest_monitor_update_id,
6905 counterparty_node_id: Some(self.context.counterparty_node_id),
6906 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6907 commitment_txid: counterparty_commitment_txid,
6908 htlc_outputs: htlcs.clone(),
6909 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6910 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6911 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6912 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6913 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6915 channel_id: Some(self.context.channel_id()),
6917 self.context.channel_state.set_awaiting_remote_revoke();
6921 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6922 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6923 where L::Target: Logger
6925 let counterparty_keys = self.context.build_remote_transaction_keys();
6926 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6927 let counterparty_commitment_tx = commitment_stats.tx;
6929 #[cfg(any(test, fuzzing))]
6931 if !self.context.is_outbound() {
6932 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6933 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6934 if let Some(info) = projected_commit_tx_info {
6935 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6936 if info.total_pending_htlcs == total_pending_htlcs
6937 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6938 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6939 && info.feerate == self.context.feerate_per_kw {
6940 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6941 assert_eq!(actual_fee, info.fee);
6947 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6950 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6951 /// generation when we shouldn't change HTLC/channel state.
6952 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6953 // Get the fee tests from `build_commitment_no_state_update`
6954 #[cfg(any(test, fuzzing))]
6955 self.build_commitment_no_state_update(logger);
6957 let counterparty_keys = self.context.build_remote_transaction_keys();
6958 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6959 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6961 match &self.context.holder_signer {
6962 ChannelSignerType::Ecdsa(ecdsa) => {
6963 let (signature, htlc_signatures);
6966 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6967 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6971 let res = ecdsa.sign_counterparty_commitment(
6972 &commitment_stats.tx,
6973 commitment_stats.inbound_htlc_preimages,
6974 commitment_stats.outbound_htlc_preimages,
6975 &self.context.secp_ctx,
6976 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6978 htlc_signatures = res.1;
6980 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6981 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6982 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6983 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6985 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6986 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6987 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6988 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6989 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6990 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6994 Ok((msgs::CommitmentSigned {
6995 channel_id: self.context.channel_id,
6999 partial_signature_with_nonce: None,
7000 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7002 // TODO (taproot|arik)
7008 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7009 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7011 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7012 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7013 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7014 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7015 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7016 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7017 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7018 where F::Target: FeeEstimator, L::Target: Logger
7020 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7021 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7022 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7025 let monitor_update = self.build_commitment_no_status_check(logger);
7026 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7027 Ok(self.push_ret_blockable_mon_update(monitor_update))
7033 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7035 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7036 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7037 fee_base_msat: msg.contents.fee_base_msat,
7038 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7039 cltv_expiry_delta: msg.contents.cltv_expiry_delta
7041 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7043 self.context.counterparty_forwarding_info = new_forwarding_info;
7049 /// Begins the shutdown process, getting a message for the remote peer and returning all
7050 /// holding cell HTLCs for payment failure.
7051 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7052 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7053 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7055 for htlc in self.context.pending_outbound_htlcs.iter() {
7056 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7057 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7060 if self.context.channel_state.is_local_shutdown_sent() {
7061 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7063 else if self.context.channel_state.is_remote_shutdown_sent() {
7064 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7066 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7067 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7069 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7070 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7071 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7074 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7077 // use override shutdown script if provided
7078 let shutdown_scriptpubkey = match override_shutdown_script {
7079 Some(script) => script,
7081 // otherwise, use the shutdown scriptpubkey provided by the signer
7082 match signer_provider.get_shutdown_scriptpubkey() {
7083 Ok(scriptpubkey) => scriptpubkey,
7084 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7088 if !shutdown_scriptpubkey.is_compatible(their_features) {
7089 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7091 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7096 // From here on out, we may not fail!
7097 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7098 self.context.channel_state.set_local_shutdown_sent();
7099 self.context.local_initiated_shutdown = Some(());
7100 self.context.update_time_counter += 1;
7102 let monitor_update = if update_shutdown_script {
7103 self.context.latest_monitor_update_id += 1;
7104 let monitor_update = ChannelMonitorUpdate {
7105 update_id: self.context.latest_monitor_update_id,
7106 counterparty_node_id: Some(self.context.counterparty_node_id),
7107 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7108 scriptpubkey: self.get_closing_scriptpubkey(),
7110 channel_id: Some(self.context.channel_id()),
7112 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7113 self.push_ret_blockable_mon_update(monitor_update)
7115 let shutdown = msgs::Shutdown {
7116 channel_id: self.context.channel_id,
7117 scriptpubkey: self.get_closing_scriptpubkey(),
7120 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7121 // our shutdown until we've committed all of the pending changes.
7122 self.context.holding_cell_update_fee = None;
7123 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7124 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7126 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7127 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7134 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7135 "we can't both complete shutdown and return a monitor update");
7137 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7140 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7141 self.context.holding_cell_htlc_updates.iter()
7142 .flat_map(|htlc_update| {
7144 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7145 => Some((source, payment_hash)),
7149 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7153 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7154 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7155 pub context: ChannelContext<SP>,
7156 pub unfunded_context: UnfundedChannelContext,
7159 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7160 pub fn new<ES: Deref, F: Deref>(
7161 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7162 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7163 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7164 ) -> Result<OutboundV1Channel<SP>, APIError>
7165 where ES::Target: EntropySource,
7166 F::Target: FeeEstimator
7168 let channel_type = Self::get_initial_channel_type(&config, their_features);
7171 context: ChannelContext::new_for_outbound_channel(
7175 counterparty_node_id,
7177 channel_value_satoshis,
7181 current_chain_height,
7182 outbound_scid_alias,
7183 temporary_channel_id,
7186 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7191 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7192 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7193 let counterparty_keys = self.context.build_remote_transaction_keys();
7194 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7195 let signature = match &self.context.holder_signer {
7196 // TODO (taproot|arik): move match into calling method for Taproot
7197 ChannelSignerType::Ecdsa(ecdsa) => {
7198 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7199 .map(|(sig, _)| sig).ok()?
7201 // TODO (taproot|arik)
7206 if self.context.signer_pending_funding {
7207 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7208 self.context.signer_pending_funding = false;
7211 Some(msgs::FundingCreated {
7212 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7213 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7214 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7217 partial_signature_with_nonce: None,
7219 next_local_nonce: None,
7223 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7224 /// a funding_created message for the remote peer.
7225 /// Panics if called at some time other than immediately after initial handshake, if called twice,
7226 /// or if called on an inbound channel.
7227 /// Note that channel_id changes during this call!
7228 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7229 /// If an Err is returned, it is a ChannelError::Close.
7230 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7231 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7232 if !self.context.is_outbound() {
7233 panic!("Tried to create outbound funding_created message on an inbound channel!");
7236 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7237 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7239 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7241 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7242 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7243 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7244 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7247 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7248 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7250 // Now that we're past error-generating stuff, update our local state:
7252 self.context.channel_state = ChannelState::FundingNegotiated;
7253 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7255 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7256 // We can skip this if it is a zero-conf channel.
7257 if funding_transaction.is_coin_base() &&
7258 self.context.minimum_depth.unwrap_or(0) > 0 &&
7259 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7260 self.context.minimum_depth = Some(COINBASE_MATURITY);
7263 self.context.funding_transaction = Some(funding_transaction);
7264 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7266 let funding_created = self.get_funding_created_msg(logger);
7267 if funding_created.is_none() {
7268 #[cfg(not(async_signing))] {
7269 panic!("Failed to get signature for new funding creation");
7271 #[cfg(async_signing)] {
7272 if !self.context.signer_pending_funding {
7273 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7274 self.context.signer_pending_funding = true;
7282 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
7283 // The default channel type (ie the first one we try) depends on whether the channel is
7284 // public - if it is, we just go with `only_static_remotekey` as it's the only option
7285 // available. If it's private, we first try `scid_privacy` as it provides better privacy
7286 // with no other changes, and fall back to `only_static_remotekey`.
7287 let mut ret = ChannelTypeFeatures::only_static_remote_key();
7288 if !config.channel_handshake_config.announced_channel &&
7289 config.channel_handshake_config.negotiate_scid_privacy &&
7290 their_features.supports_scid_privacy() {
7291 ret.set_scid_privacy_required();
7294 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
7295 // set it now. If they don't understand it, we'll fall back to our default of
7296 // `only_static_remotekey`.
7297 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
7298 their_features.supports_anchors_zero_fee_htlc_tx() {
7299 ret.set_anchors_zero_fee_htlc_tx_required();
7305 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7306 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7307 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7308 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7309 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7310 ) -> Result<msgs::OpenChannel, ()>
7312 F::Target: FeeEstimator
7314 if !self.context.is_outbound() ||
7316 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7317 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
7322 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
7323 // We've exhausted our options
7326 // We support opening a few different types of channels. Try removing our additional
7327 // features one by one until we've either arrived at our default or the counterparty has
7330 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
7331 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
7332 // checks whether the counterparty supports every feature, this would only happen if the
7333 // counterparty is advertising the feature, but rejecting channels proposing the feature for
7335 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
7336 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
7337 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
7338 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
7339 } else if self.context.channel_type.supports_scid_privacy() {
7340 self.context.channel_type.clear_scid_privacy();
7342 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
7344 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
7345 Ok(self.get_open_channel(chain_hash))
7348 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7349 if !self.context.is_outbound() {
7350 panic!("Tried to open a channel for an inbound channel?");
7352 if self.context.have_received_message() {
7353 panic!("Cannot generate an open_channel after we've moved forward");
7356 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7357 panic!("Tried to send an open_channel for a channel that has already advanced");
7360 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7361 let keys = self.context.get_holder_pubkeys();
7364 common_fields: msgs::CommonOpenChannelFields {
7366 temporary_channel_id: self.context.channel_id,
7367 funding_satoshis: self.context.channel_value_satoshis,
7368 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7369 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7370 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7371 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7372 to_self_delay: self.context.get_holder_selected_contest_delay(),
7373 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7374 funding_pubkey: keys.funding_pubkey,
7375 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7376 payment_basepoint: keys.payment_point,
7377 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7378 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7379 first_per_commitment_point,
7380 channel_flags: if self.context.config.announced_channel {1} else {0},
7381 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7382 Some(script) => script.clone().into_inner(),
7383 None => Builder::new().into_script(),
7385 channel_type: Some(self.context.channel_type.clone()),
7387 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7388 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7393 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7394 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7396 // Check sanity of message fields:
7397 if !self.context.is_outbound() {
7398 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7400 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7401 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7403 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7404 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7406 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7407 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7409 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7410 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7412 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7413 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7414 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7416 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7417 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7418 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7420 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7421 if msg.common_fields.to_self_delay > max_delay_acceptable {
7422 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7424 if msg.common_fields.max_accepted_htlcs < 1 {
7425 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7427 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7428 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7431 // Now check against optional parameters as set by config...
7432 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7433 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7435 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7436 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7438 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7439 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7441 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7442 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7444 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7445 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7447 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7448 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7450 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7451 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7454 if let Some(ty) = &msg.common_fields.channel_type {
7455 if *ty != self.context.channel_type {
7456 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7458 } else if their_features.supports_channel_type() {
7459 // Assume they've accepted the channel type as they said they understand it.
7461 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7462 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7463 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7465 self.context.channel_type = channel_type.clone();
7466 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7469 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7470 match &msg.common_fields.shutdown_scriptpubkey {
7471 &Some(ref script) => {
7472 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7473 if script.len() == 0 {
7476 if !script::is_bolt2_compliant(&script, their_features) {
7477 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7479 Some(script.clone())
7482 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7484 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7489 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7490 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7491 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7492 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7493 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7495 if peer_limits.trust_own_funding_0conf {
7496 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7498 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7501 let counterparty_pubkeys = ChannelPublicKeys {
7502 funding_pubkey: msg.common_fields.funding_pubkey,
7503 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7504 payment_point: msg.common_fields.payment_basepoint,
7505 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7506 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7509 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7510 selected_contest_delay: msg.common_fields.to_self_delay,
7511 pubkeys: counterparty_pubkeys,
7514 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7515 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7517 self.context.channel_state = ChannelState::NegotiatingFunding(
7518 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7520 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7525 /// Handles a funding_signed message from the remote end.
7526 /// If this call is successful, broadcast the funding transaction (and not before!)
7527 pub fn funding_signed<L: Deref>(
7528 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7529 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7533 if !self.context.is_outbound() {
7534 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7536 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7537 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7539 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7540 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7541 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7542 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7545 let funding_script = self.context.get_funding_redeemscript();
7547 let counterparty_keys = self.context.build_remote_transaction_keys();
7548 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7549 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7550 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7552 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7553 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7555 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7556 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7558 let trusted_tx = initial_commitment_tx.trust();
7559 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7560 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7561 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7562 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7563 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7567 let holder_commitment_tx = HolderCommitmentTransaction::new(
7568 initial_commitment_tx,
7571 &self.context.get_holder_pubkeys().funding_pubkey,
7572 self.context.counterparty_funding_pubkey()
7576 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7577 if validated.is_err() {
7578 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7581 let funding_redeemscript = self.context.get_funding_redeemscript();
7582 let funding_txo = self.context.get_funding_txo().unwrap();
7583 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7584 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7585 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7586 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7587 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7588 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7589 shutdown_script, self.context.get_holder_selected_contest_delay(),
7590 &self.context.destination_script, (funding_txo, funding_txo_script),
7591 &self.context.channel_transaction_parameters,
7592 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7594 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7595 channel_monitor.provide_initial_counterparty_commitment_tx(
7596 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7597 self.context.cur_counterparty_commitment_transaction_number,
7598 self.context.counterparty_cur_commitment_point.unwrap(),
7599 counterparty_initial_commitment_tx.feerate_per_kw(),
7600 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7601 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7603 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7604 if self.context.is_batch_funding() {
7605 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7607 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7609 self.context.cur_holder_commitment_transaction_number -= 1;
7610 self.context.cur_counterparty_commitment_transaction_number -= 1;
7612 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7614 let mut channel = Channel {
7615 context: self.context,
7616 #[cfg(dual_funding)]
7617 dual_funding_channel_context: None,
7620 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7621 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7622 Ok((channel, channel_monitor))
7625 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7627 #[cfg(async_signing)]
7628 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7629 if self.context.signer_pending_funding && self.context.is_outbound() {
7630 log_trace!(logger, "Signer unblocked a funding_created");
7631 self.get_funding_created_msg(logger)
7636 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7637 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7638 pub context: ChannelContext<SP>,
7639 pub unfunded_context: UnfundedChannelContext,
7642 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7643 /// [`msgs::CommonOpenChannelFields`].
7644 pub(super) fn channel_type_from_open_channel(
7645 common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7646 our_supported_features: &ChannelTypeFeatures
7647 ) -> Result<ChannelTypeFeatures, ChannelError> {
7648 if let Some(channel_type) = &common_fields.channel_type {
7649 if channel_type.supports_any_optional_bits() {
7650 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7653 // We only support the channel types defined by the `ChannelManager` in
7654 // `provided_channel_type_features`. The channel type must always support
7655 // `static_remote_key`.
7656 if !channel_type.requires_static_remote_key() {
7657 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7659 // Make sure we support all of the features behind the channel type.
7660 if !channel_type.is_subset(our_supported_features) {
7661 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7663 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7664 if channel_type.requires_scid_privacy() && announced_channel {
7665 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7667 Ok(channel_type.clone())
7669 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7670 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7671 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7677 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7678 /// Creates a new channel from a remote sides' request for one.
7679 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7680 pub fn new<ES: Deref, F: Deref, L: Deref>(
7681 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7682 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7683 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7684 current_chain_height: u32, logger: &L, is_0conf: bool,
7685 ) -> Result<InboundV1Channel<SP>, ChannelError>
7686 where ES::Target: EntropySource,
7687 F::Target: FeeEstimator,
7690 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7692 // First check the channel type is known, failing before we do anything else if we don't
7693 // support this channel type.
7694 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7696 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7697 let counterparty_pubkeys = ChannelPublicKeys {
7698 funding_pubkey: msg.common_fields.funding_pubkey,
7699 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7700 payment_point: msg.common_fields.payment_basepoint,
7701 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7702 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7706 context: ChannelContext::new_for_inbound_channel(
7710 counterparty_node_id,
7714 current_chain_height,
7719 counterparty_pubkeys,
7721 holder_selected_channel_reserve_satoshis,
7722 msg.channel_reserve_satoshis,
7724 msg.common_fields.clone(),
7726 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7731 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7732 /// should be sent back to the counterparty node.
7734 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7735 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7736 if self.context.is_outbound() {
7737 panic!("Tried to send accept_channel for an outbound channel?");
7740 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7741 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7743 panic!("Tried to send accept_channel after channel had moved forward");
7745 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7746 panic!("Tried to send an accept_channel for a channel that has already advanced");
7749 self.generate_accept_channel_message()
7752 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7753 /// inbound channel. If the intention is to accept an inbound channel, use
7754 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7756 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7757 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7758 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7759 let keys = self.context.get_holder_pubkeys();
7761 msgs::AcceptChannel {
7762 common_fields: msgs::CommonAcceptChannelFields {
7763 temporary_channel_id: self.context.channel_id,
7764 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7765 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7766 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7767 minimum_depth: self.context.minimum_depth.unwrap(),
7768 to_self_delay: self.context.get_holder_selected_contest_delay(),
7769 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7770 funding_pubkey: keys.funding_pubkey,
7771 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7772 payment_basepoint: keys.payment_point,
7773 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7774 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7775 first_per_commitment_point,
7776 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7777 Some(script) => script.clone().into_inner(),
7778 None => Builder::new().into_script(),
7780 channel_type: Some(self.context.channel_type.clone()),
7782 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7784 next_local_nonce: None,
7788 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7789 /// inbound channel without accepting it.
7791 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7793 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7794 self.generate_accept_channel_message()
7797 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7798 let funding_script = self.context.get_funding_redeemscript();
7800 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7801 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7802 let trusted_tx = initial_commitment_tx.trust();
7803 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7804 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7805 // They sign the holder commitment transaction...
7806 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7807 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7808 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7809 encode::serialize_hex(&funding_script), &self.context.channel_id());
7810 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7812 Ok(initial_commitment_tx)
7815 pub fn funding_created<L: Deref>(
7816 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7817 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7821 if self.context.is_outbound() {
7822 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7825 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7826 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7828 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7829 // remember the channel, so it's safe to just send an error_message here and drop the
7831 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7833 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7834 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7835 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7836 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7839 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7840 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7841 // This is an externally observable change before we finish all our checks. In particular
7842 // check_funding_created_signature may fail.
7843 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7845 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7847 Err(ChannelError::Close(e)) => {
7848 self.context.channel_transaction_parameters.funding_outpoint = None;
7849 return Err((self, ChannelError::Close(e)));
7852 // The only error we know how to handle is ChannelError::Close, so we fall over here
7853 // to make sure we don't continue with an inconsistent state.
7854 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7858 let holder_commitment_tx = HolderCommitmentTransaction::new(
7859 initial_commitment_tx,
7862 &self.context.get_holder_pubkeys().funding_pubkey,
7863 self.context.counterparty_funding_pubkey()
7866 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7867 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7870 // Now that we're past error-generating stuff, update our local state:
7872 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7873 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7874 self.context.cur_counterparty_commitment_transaction_number -= 1;
7875 self.context.cur_holder_commitment_transaction_number -= 1;
7877 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7879 let funding_redeemscript = self.context.get_funding_redeemscript();
7880 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7881 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7882 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7883 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7884 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7885 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7886 shutdown_script, self.context.get_holder_selected_contest_delay(),
7887 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7888 &self.context.channel_transaction_parameters,
7889 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7891 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7892 channel_monitor.provide_initial_counterparty_commitment_tx(
7893 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7894 self.context.cur_counterparty_commitment_transaction_number + 1,
7895 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7896 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7897 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7899 log_info!(logger, "{} funding_signed for peer for channel {}",
7900 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7902 // Promote the channel to a full-fledged one now that we have updated the state and have a
7903 // `ChannelMonitor`.
7904 let mut channel = Channel {
7905 context: self.context,
7906 #[cfg(dual_funding)]
7907 dual_funding_channel_context: None,
7909 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7910 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7912 Ok((channel, funding_signed, channel_monitor))
7916 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
7917 #[cfg(dual_funding)]
7918 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
7919 pub context: ChannelContext<SP>,
7920 pub unfunded_context: UnfundedChannelContext,
7921 pub dual_funding_context: DualFundingChannelContext,
7924 #[cfg(dual_funding)]
7925 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
7926 /// Creates a new dual-funded channel from a remote side's request for one.
7927 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7928 pub fn new<ES: Deref, F: Deref, L: Deref>(
7929 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7930 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7931 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
7932 config: &UserConfig, current_chain_height: u32, logger: &L,
7933 ) -> Result<InboundV2Channel<SP>, ChannelError>
7934 where ES::Target: EntropySource,
7935 F::Target: FeeEstimator,
7938 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
7939 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
7940 channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
7941 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
7942 channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7944 // First check the channel type is known, failing before we do anything else if we don't
7945 // support this channel type.
7946 if msg.common_fields.channel_type.is_none() {
7947 return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
7948 msg.common_fields.temporary_channel_id)))
7950 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7952 let counterparty_pubkeys = ChannelPublicKeys {
7953 funding_pubkey: msg.common_fields.funding_pubkey,
7954 revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
7955 payment_point: msg.common_fields.payment_basepoint,
7956 delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
7957 htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
7960 let mut context = ChannelContext::new_for_inbound_channel(
7964 counterparty_node_id,
7968 current_chain_height,
7974 counterparty_pubkeys,
7976 holder_selected_channel_reserve_satoshis,
7977 counterparty_selected_channel_reserve_satoshis,
7978 0 /* push_msat not used in dual-funding */,
7979 msg.common_fields.clone(),
7981 let channel_id = ChannelId::v2_from_revocation_basepoints(
7982 &context.get_holder_pubkeys().revocation_basepoint,
7983 &context.get_counterparty_pubkeys().revocation_basepoint);
7984 context.channel_id = channel_id;
7988 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
7989 dual_funding_context: DualFundingChannelContext {
7990 our_funding_satoshis: funding_satoshis,
7991 their_funding_satoshis: msg.common_fields.funding_satoshis,
7992 funding_tx_locktime: msg.locktime,
7993 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8000 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8001 /// should be sent back to the counterparty node.
8003 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8004 pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8005 if self.context.is_outbound() {
8006 debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8009 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8010 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8012 debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8014 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8015 debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8018 self.generate_accept_channel_v2_message()
8021 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8022 /// inbound channel. If the intention is to accept an inbound channel, use
8023 /// [`InboundV1Channel::accept_inbound_channel`] instead.
8025 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8026 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8027 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8028 self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8029 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8030 self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8031 let keys = self.context.get_holder_pubkeys();
8033 msgs::AcceptChannelV2 {
8034 common_fields: msgs::CommonAcceptChannelFields {
8035 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8036 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8037 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8038 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8039 minimum_depth: self.context.minimum_depth.unwrap(),
8040 to_self_delay: self.context.get_holder_selected_contest_delay(),
8041 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8042 funding_pubkey: keys.funding_pubkey,
8043 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8044 payment_basepoint: keys.payment_point,
8045 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8046 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8047 first_per_commitment_point,
8048 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8049 Some(script) => script.clone().into_inner(),
8050 None => Builder::new().into_script(),
8052 channel_type: Some(self.context.channel_type.clone()),
8054 funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8055 second_per_commitment_point,
8056 require_confirmed_inputs: None,
8060 /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8061 /// inbound channel without accepting it.
8063 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8065 pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8066 self.generate_accept_channel_v2_message()
8070 const SERIALIZATION_VERSION: u8 = 3;
8071 const MIN_SERIALIZATION_VERSION: u8 = 3;
8073 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8079 impl Writeable for ChannelUpdateStatus {
8080 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8081 // We only care about writing out the current state as it was announced, ie only either
8082 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8083 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8085 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8086 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8087 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8088 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8094 impl Readable for ChannelUpdateStatus {
8095 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8096 Ok(match <u8 as Readable>::read(reader)? {
8097 0 => ChannelUpdateStatus::Enabled,
8098 1 => ChannelUpdateStatus::Disabled,
8099 _ => return Err(DecodeError::InvalidValue),
8104 impl Writeable for AnnouncementSigsState {
8105 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8106 // We only care about writing out the current state as if we had just disconnected, at
8107 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8109 AnnouncementSigsState::NotSent => 0u8.write(writer),
8110 AnnouncementSigsState::MessageSent => 0u8.write(writer),
8111 AnnouncementSigsState::Committed => 0u8.write(writer),
8112 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8117 impl Readable for AnnouncementSigsState {
8118 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8119 Ok(match <u8 as Readable>::read(reader)? {
8120 0 => AnnouncementSigsState::NotSent,
8121 1 => AnnouncementSigsState::PeerReceived,
8122 _ => return Err(DecodeError::InvalidValue),
8127 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8128 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8129 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8132 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
8134 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8135 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8136 // the low bytes now and the optional high bytes later.
8137 let user_id_low = self.context.user_id as u64;
8138 user_id_low.write(writer)?;
8140 // Version 1 deserializers expected to read parts of the config object here. Version 2
8141 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8142 // `minimum_depth` we simply write dummy values here.
8143 writer.write_all(&[0; 8])?;
8145 self.context.channel_id.write(writer)?;
8147 let mut channel_state = self.context.channel_state;
8148 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8149 channel_state.set_peer_disconnected();
8151 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8153 channel_state.to_u32().write(writer)?;
8155 self.context.channel_value_satoshis.write(writer)?;
8157 self.context.latest_monitor_update_id.write(writer)?;
8159 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8160 // deserialized from that format.
8161 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8162 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8163 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8165 self.context.destination_script.write(writer)?;
8167 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8168 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8169 self.context.value_to_self_msat.write(writer)?;
8171 let mut dropped_inbound_htlcs = 0;
8172 for htlc in self.context.pending_inbound_htlcs.iter() {
8173 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8174 dropped_inbound_htlcs += 1;
8177 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8178 for htlc in self.context.pending_inbound_htlcs.iter() {
8179 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8182 htlc.htlc_id.write(writer)?;
8183 htlc.amount_msat.write(writer)?;
8184 htlc.cltv_expiry.write(writer)?;
8185 htlc.payment_hash.write(writer)?;
8187 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8188 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
8190 htlc_state.write(writer)?;
8192 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
8194 htlc_state.write(writer)?;
8196 &InboundHTLCState::Committed => {
8199 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8201 removal_reason.write(writer)?;
8206 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8207 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8208 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8210 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8211 for htlc in self.context.pending_outbound_htlcs.iter() {
8212 htlc.htlc_id.write(writer)?;
8213 htlc.amount_msat.write(writer)?;
8214 htlc.cltv_expiry.write(writer)?;
8215 htlc.payment_hash.write(writer)?;
8216 htlc.source.write(writer)?;
8218 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8220 onion_packet.write(writer)?;
8222 &OutboundHTLCState::Committed => {
8225 &OutboundHTLCState::RemoteRemoved(_) => {
8226 // Treat this as a Committed because we haven't received the CS - they'll
8227 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8230 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8232 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8233 preimages.push(preimage);
8235 let reason: Option<&HTLCFailReason> = outcome.into();
8236 reason.write(writer)?;
8238 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8240 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8241 preimages.push(preimage);
8243 let reason: Option<&HTLCFailReason> = outcome.into();
8244 reason.write(writer)?;
8247 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8248 pending_outbound_blinding_points.push(htlc.blinding_point);
8251 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8252 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8253 // Vec of (htlc_id, failure_code, sha256_of_onion)
8254 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8255 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8256 for update in self.context.holding_cell_htlc_updates.iter() {
8258 &HTLCUpdateAwaitingACK::AddHTLC {
8259 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8260 blinding_point, skimmed_fee_msat,
8263 amount_msat.write(writer)?;
8264 cltv_expiry.write(writer)?;
8265 payment_hash.write(writer)?;
8266 source.write(writer)?;
8267 onion_routing_packet.write(writer)?;
8269 holding_cell_skimmed_fees.push(skimmed_fee_msat);
8270 holding_cell_blinding_points.push(blinding_point);
8272 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8274 payment_preimage.write(writer)?;
8275 htlc_id.write(writer)?;
8277 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8279 htlc_id.write(writer)?;
8280 err_packet.write(writer)?;
8282 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8283 htlc_id, failure_code, sha256_of_onion
8285 // We don't want to break downgrading by adding a new variant, so write a dummy
8286 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8287 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8289 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8291 htlc_id.write(writer)?;
8292 dummy_err_packet.write(writer)?;
8297 match self.context.resend_order {
8298 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8299 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8302 self.context.monitor_pending_channel_ready.write(writer)?;
8303 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8304 self.context.monitor_pending_commitment_signed.write(writer)?;
8306 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8307 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8308 pending_forward.write(writer)?;
8309 htlc_id.write(writer)?;
8312 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8313 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8314 htlc_source.write(writer)?;
8315 payment_hash.write(writer)?;
8316 fail_reason.write(writer)?;
8319 if self.context.is_outbound() {
8320 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8321 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8322 Some(feerate).write(writer)?;
8324 // As for inbound HTLCs, if the update was only announced and never committed in a
8325 // commitment_signed, drop it.
8326 None::<u32>.write(writer)?;
8328 self.context.holding_cell_update_fee.write(writer)?;
8330 self.context.next_holder_htlc_id.write(writer)?;
8331 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8332 self.context.update_time_counter.write(writer)?;
8333 self.context.feerate_per_kw.write(writer)?;
8335 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8336 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8337 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8338 // consider the stale state on reload.
8341 self.context.funding_tx_confirmed_in.write(writer)?;
8342 self.context.funding_tx_confirmation_height.write(writer)?;
8343 self.context.short_channel_id.write(writer)?;
8345 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8346 self.context.holder_dust_limit_satoshis.write(writer)?;
8347 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8349 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8350 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8352 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8353 self.context.holder_htlc_minimum_msat.write(writer)?;
8354 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8356 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8357 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8359 match &self.context.counterparty_forwarding_info {
8362 info.fee_base_msat.write(writer)?;
8363 info.fee_proportional_millionths.write(writer)?;
8364 info.cltv_expiry_delta.write(writer)?;
8366 None => 0u8.write(writer)?
8369 self.context.channel_transaction_parameters.write(writer)?;
8370 self.context.funding_transaction.write(writer)?;
8372 self.context.counterparty_cur_commitment_point.write(writer)?;
8373 self.context.counterparty_prev_commitment_point.write(writer)?;
8374 self.context.counterparty_node_id.write(writer)?;
8376 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8378 self.context.commitment_secrets.write(writer)?;
8380 self.context.channel_update_status.write(writer)?;
8382 #[cfg(any(test, fuzzing))]
8383 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8384 #[cfg(any(test, fuzzing))]
8385 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8386 htlc.write(writer)?;
8389 // If the channel type is something other than only-static-remote-key, then we need to have
8390 // older clients fail to deserialize this channel at all. If the type is
8391 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8393 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8394 Some(&self.context.channel_type) } else { None };
8396 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8397 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8398 // a different percentage of the channel value then 10%, which older versions of LDK used
8399 // to set it to before the percentage was made configurable.
8400 let serialized_holder_selected_reserve =
8401 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8402 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8404 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8405 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8406 let serialized_holder_htlc_max_in_flight =
8407 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8408 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8410 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8411 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8413 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8414 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8415 // we write the high bytes as an option here.
8416 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8418 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8420 write_tlv_fields!(writer, {
8421 (0, self.context.announcement_sigs, option),
8422 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8423 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8424 // them twice, once with their original default values above, and once as an option
8425 // here. On the read side, old versions will simply ignore the odd-type entries here,
8426 // and new versions map the default values to None and allow the TLV entries here to
8428 (1, self.context.minimum_depth, option),
8429 (2, chan_type, option),
8430 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8431 (4, serialized_holder_selected_reserve, option),
8432 (5, self.context.config, required),
8433 (6, serialized_holder_htlc_max_in_flight, option),
8434 (7, self.context.shutdown_scriptpubkey, option),
8435 (8, self.context.blocked_monitor_updates, optional_vec),
8436 (9, self.context.target_closing_feerate_sats_per_kw, option),
8437 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8438 (13, self.context.channel_creation_height, required),
8439 (15, preimages, required_vec),
8440 (17, self.context.announcement_sigs_state, required),
8441 (19, self.context.latest_inbound_scid_alias, option),
8442 (21, self.context.outbound_scid_alias, required),
8443 (23, channel_ready_event_emitted, option),
8444 (25, user_id_high_opt, option),
8445 (27, self.context.channel_keys_id, required),
8446 (28, holder_max_accepted_htlcs, option),
8447 (29, self.context.temporary_channel_id, option),
8448 (31, channel_pending_event_emitted, option),
8449 (35, pending_outbound_skimmed_fees, optional_vec),
8450 (37, holding_cell_skimmed_fees, optional_vec),
8451 (38, self.context.is_batch_funding, option),
8452 (39, pending_outbound_blinding_points, optional_vec),
8453 (41, holding_cell_blinding_points, optional_vec),
8454 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8455 (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8462 const MAX_ALLOC_SIZE: usize = 64*1024;
8463 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8465 ES::Target: EntropySource,
8466 SP::Target: SignerProvider
8468 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8469 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8470 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8472 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8473 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8474 // the low bytes now and the high bytes later.
8475 let user_id_low: u64 = Readable::read(reader)?;
8477 let mut config = Some(LegacyChannelConfig::default());
8479 // Read the old serialization of the ChannelConfig from version 0.0.98.
8480 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8481 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8482 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8483 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8485 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8486 let mut _val: u64 = Readable::read(reader)?;
8489 let channel_id = Readable::read(reader)?;
8490 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8491 let channel_value_satoshis = Readable::read(reader)?;
8493 let latest_monitor_update_id = Readable::read(reader)?;
8495 let mut keys_data = None;
8497 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8498 // the `channel_keys_id` TLV is present below.
8499 let keys_len: u32 = Readable::read(reader)?;
8500 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8501 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8502 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8503 let mut data = [0; 1024];
8504 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8505 reader.read_exact(read_slice)?;
8506 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8510 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8511 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8512 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8515 let destination_script = Readable::read(reader)?;
8517 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8518 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8519 let value_to_self_msat = Readable::read(reader)?;
8521 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8523 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8524 for _ in 0..pending_inbound_htlc_count {
8525 pending_inbound_htlcs.push(InboundHTLCOutput {
8526 htlc_id: Readable::read(reader)?,
8527 amount_msat: Readable::read(reader)?,
8528 cltv_expiry: Readable::read(reader)?,
8529 payment_hash: Readable::read(reader)?,
8530 state: match <u8 as Readable>::read(reader)? {
8531 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8532 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8533 3 => InboundHTLCState::Committed,
8534 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8535 _ => return Err(DecodeError::InvalidValue),
8540 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8541 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8542 for _ in 0..pending_outbound_htlc_count {
8543 pending_outbound_htlcs.push(OutboundHTLCOutput {
8544 htlc_id: Readable::read(reader)?,
8545 amount_msat: Readable::read(reader)?,
8546 cltv_expiry: Readable::read(reader)?,
8547 payment_hash: Readable::read(reader)?,
8548 source: Readable::read(reader)?,
8549 state: match <u8 as Readable>::read(reader)? {
8550 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8551 1 => OutboundHTLCState::Committed,
8553 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8554 OutboundHTLCState::RemoteRemoved(option.into())
8557 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8558 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8561 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8562 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8564 _ => return Err(DecodeError::InvalidValue),
8566 skimmed_fee_msat: None,
8567 blinding_point: None,
8571 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8572 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8573 for _ in 0..holding_cell_htlc_update_count {
8574 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8575 0 => HTLCUpdateAwaitingACK::AddHTLC {
8576 amount_msat: Readable::read(reader)?,
8577 cltv_expiry: Readable::read(reader)?,
8578 payment_hash: Readable::read(reader)?,
8579 source: Readable::read(reader)?,
8580 onion_routing_packet: Readable::read(reader)?,
8581 skimmed_fee_msat: None,
8582 blinding_point: None,
8584 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8585 payment_preimage: Readable::read(reader)?,
8586 htlc_id: Readable::read(reader)?,
8588 2 => HTLCUpdateAwaitingACK::FailHTLC {
8589 htlc_id: Readable::read(reader)?,
8590 err_packet: Readable::read(reader)?,
8592 _ => return Err(DecodeError::InvalidValue),
8596 let resend_order = match <u8 as Readable>::read(reader)? {
8597 0 => RAACommitmentOrder::CommitmentFirst,
8598 1 => RAACommitmentOrder::RevokeAndACKFirst,
8599 _ => return Err(DecodeError::InvalidValue),
8602 let monitor_pending_channel_ready = Readable::read(reader)?;
8603 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8604 let monitor_pending_commitment_signed = Readable::read(reader)?;
8606 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8607 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8608 for _ in 0..monitor_pending_forwards_count {
8609 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8612 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8613 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8614 for _ in 0..monitor_pending_failures_count {
8615 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8618 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8620 let holding_cell_update_fee = Readable::read(reader)?;
8622 let next_holder_htlc_id = Readable::read(reader)?;
8623 let next_counterparty_htlc_id = Readable::read(reader)?;
8624 let update_time_counter = Readable::read(reader)?;
8625 let feerate_per_kw = Readable::read(reader)?;
8627 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8628 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8629 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8630 // consider the stale state on reload.
8631 match <u8 as Readable>::read(reader)? {
8634 let _: u32 = Readable::read(reader)?;
8635 let _: u64 = Readable::read(reader)?;
8636 let _: Signature = Readable::read(reader)?;
8638 _ => return Err(DecodeError::InvalidValue),
8641 let funding_tx_confirmed_in = Readable::read(reader)?;
8642 let funding_tx_confirmation_height = Readable::read(reader)?;
8643 let short_channel_id = Readable::read(reader)?;
8645 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8646 let holder_dust_limit_satoshis = Readable::read(reader)?;
8647 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8648 let mut counterparty_selected_channel_reserve_satoshis = None;
8650 // Read the old serialization from version 0.0.98.
8651 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8653 // Read the 8 bytes of backwards-compatibility data.
8654 let _dummy: u64 = Readable::read(reader)?;
8656 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8657 let holder_htlc_minimum_msat = Readable::read(reader)?;
8658 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8660 let mut minimum_depth = None;
8662 // Read the old serialization from version 0.0.98.
8663 minimum_depth = Some(Readable::read(reader)?);
8665 // Read the 4 bytes of backwards-compatibility data.
8666 let _dummy: u32 = Readable::read(reader)?;
8669 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8671 1 => Some(CounterpartyForwardingInfo {
8672 fee_base_msat: Readable::read(reader)?,
8673 fee_proportional_millionths: Readable::read(reader)?,
8674 cltv_expiry_delta: Readable::read(reader)?,
8676 _ => return Err(DecodeError::InvalidValue),
8679 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8680 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8682 let counterparty_cur_commitment_point = Readable::read(reader)?;
8684 let counterparty_prev_commitment_point = Readable::read(reader)?;
8685 let counterparty_node_id = Readable::read(reader)?;
8687 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8688 let commitment_secrets = Readable::read(reader)?;
8690 let channel_update_status = Readable::read(reader)?;
8692 #[cfg(any(test, fuzzing))]
8693 let mut historical_inbound_htlc_fulfills = new_hash_set();
8694 #[cfg(any(test, fuzzing))]
8696 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8697 for _ in 0..htlc_fulfills_len {
8698 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8702 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8703 Some((feerate, if channel_parameters.is_outbound_from_holder {
8704 FeeUpdateState::Outbound
8706 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8712 let mut announcement_sigs = None;
8713 let mut target_closing_feerate_sats_per_kw = None;
8714 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8715 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8716 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8717 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8718 // only, so we default to that if none was written.
8719 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8720 let mut channel_creation_height = Some(serialized_height);
8721 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8723 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8724 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8725 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8726 let mut latest_inbound_scid_alias = None;
8727 let mut outbound_scid_alias = None;
8728 let mut channel_pending_event_emitted = None;
8729 let mut channel_ready_event_emitted = None;
8731 let mut user_id_high_opt: Option<u64> = None;
8732 let mut channel_keys_id: Option<[u8; 32]> = None;
8733 let mut temporary_channel_id: Option<ChannelId> = None;
8734 let mut holder_max_accepted_htlcs: Option<u16> = None;
8736 let mut blocked_monitor_updates = Some(Vec::new());
8738 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8739 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8741 let mut is_batch_funding: Option<()> = None;
8743 let mut local_initiated_shutdown: Option<()> = None;
8745 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8746 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8748 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8750 read_tlv_fields!(reader, {
8751 (0, announcement_sigs, option),
8752 (1, minimum_depth, option),
8753 (2, channel_type, option),
8754 (3, counterparty_selected_channel_reserve_satoshis, option),
8755 (4, holder_selected_channel_reserve_satoshis, option),
8756 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8757 (6, holder_max_htlc_value_in_flight_msat, option),
8758 (7, shutdown_scriptpubkey, option),
8759 (8, blocked_monitor_updates, optional_vec),
8760 (9, target_closing_feerate_sats_per_kw, option),
8761 (11, monitor_pending_finalized_fulfills, optional_vec),
8762 (13, channel_creation_height, option),
8763 (15, preimages_opt, optional_vec),
8764 (17, announcement_sigs_state, option),
8765 (19, latest_inbound_scid_alias, option),
8766 (21, outbound_scid_alias, option),
8767 (23, channel_ready_event_emitted, option),
8768 (25, user_id_high_opt, option),
8769 (27, channel_keys_id, option),
8770 (28, holder_max_accepted_htlcs, option),
8771 (29, temporary_channel_id, option),
8772 (31, channel_pending_event_emitted, option),
8773 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8774 (37, holding_cell_skimmed_fees_opt, optional_vec),
8775 (38, is_batch_funding, option),
8776 (39, pending_outbound_blinding_points_opt, optional_vec),
8777 (41, holding_cell_blinding_points_opt, optional_vec),
8778 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8779 (45, local_initiated_shutdown, option),
8782 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8783 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8784 // If we've gotten to the funding stage of the channel, populate the signer with its
8785 // required channel parameters.
8786 if channel_state >= ChannelState::FundingNegotiated {
8787 holder_signer.provide_channel_parameters(&channel_parameters);
8789 (channel_keys_id, holder_signer)
8791 // `keys_data` can be `None` if we had corrupted data.
8792 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8793 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8794 (holder_signer.channel_keys_id(), holder_signer)
8797 if let Some(preimages) = preimages_opt {
8798 let mut iter = preimages.into_iter();
8799 for htlc in pending_outbound_htlcs.iter_mut() {
8801 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8802 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8804 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8805 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8810 // We expect all preimages to be consumed above
8811 if iter.next().is_some() {
8812 return Err(DecodeError::InvalidValue);
8816 let chan_features = channel_type.as_ref().unwrap();
8817 if !chan_features.is_subset(our_supported_features) {
8818 // If the channel was written by a new version and negotiated with features we don't
8819 // understand yet, refuse to read it.
8820 return Err(DecodeError::UnknownRequiredFeature);
8823 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8824 // To account for that, we're proactively setting/overriding the field here.
8825 channel_parameters.channel_type_features = chan_features.clone();
8827 let mut secp_ctx = Secp256k1::new();
8828 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8830 // `user_id` used to be a single u64 value. In order to remain backwards
8831 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8832 // separate u64 values.
8833 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8835 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8837 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8838 let mut iter = skimmed_fees.into_iter();
8839 for htlc in pending_outbound_htlcs.iter_mut() {
8840 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8842 // We expect all skimmed fees to be consumed above
8843 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8845 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8846 let mut iter = skimmed_fees.into_iter();
8847 for htlc in holding_cell_htlc_updates.iter_mut() {
8848 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8849 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8852 // We expect all skimmed fees to be consumed above
8853 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8855 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8856 let mut iter = blinding_pts.into_iter();
8857 for htlc in pending_outbound_htlcs.iter_mut() {
8858 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8860 // We expect all blinding points to be consumed above
8861 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8863 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8864 let mut iter = blinding_pts.into_iter();
8865 for htlc in holding_cell_htlc_updates.iter_mut() {
8866 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8867 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8870 // We expect all blinding points to be consumed above
8871 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8874 if let Some(malformed_htlcs) = malformed_htlcs {
8875 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8876 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8877 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8878 let matches = *htlc_id == malformed_htlc_id;
8879 if matches { debug_assert!(err_packet.data.is_empty()) }
8882 }).ok_or(DecodeError::InvalidValue)?;
8883 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8884 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8886 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8891 context: ChannelContext {
8894 config: config.unwrap(),
8898 // Note that we don't care about serializing handshake limits as we only ever serialize
8899 // channel data after the handshake has completed.
8900 inbound_handshake_limits_override: None,
8903 temporary_channel_id,
8905 announcement_sigs_state: announcement_sigs_state.unwrap(),
8907 channel_value_satoshis,
8909 latest_monitor_update_id,
8911 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8912 shutdown_scriptpubkey,
8915 cur_holder_commitment_transaction_number,
8916 cur_counterparty_commitment_transaction_number,
8919 holder_max_accepted_htlcs,
8920 pending_inbound_htlcs,
8921 pending_outbound_htlcs,
8922 holding_cell_htlc_updates,
8926 monitor_pending_channel_ready,
8927 monitor_pending_revoke_and_ack,
8928 monitor_pending_commitment_signed,
8929 monitor_pending_forwards,
8930 monitor_pending_failures,
8931 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8933 signer_pending_commitment_update: false,
8934 signer_pending_funding: false,
8937 holding_cell_update_fee,
8938 next_holder_htlc_id,
8939 next_counterparty_htlc_id,
8940 update_time_counter,
8943 #[cfg(debug_assertions)]
8944 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8945 #[cfg(debug_assertions)]
8946 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8948 last_sent_closing_fee: None,
8949 pending_counterparty_closing_signed: None,
8950 expecting_peer_commitment_signed: false,
8951 closing_fee_limits: None,
8952 target_closing_feerate_sats_per_kw,
8954 funding_tx_confirmed_in,
8955 funding_tx_confirmation_height,
8957 channel_creation_height: channel_creation_height.unwrap(),
8959 counterparty_dust_limit_satoshis,
8960 holder_dust_limit_satoshis,
8961 counterparty_max_htlc_value_in_flight_msat,
8962 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8963 counterparty_selected_channel_reserve_satoshis,
8964 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8965 counterparty_htlc_minimum_msat,
8966 holder_htlc_minimum_msat,
8967 counterparty_max_accepted_htlcs,
8970 counterparty_forwarding_info,
8972 channel_transaction_parameters: channel_parameters,
8973 funding_transaction,
8976 counterparty_cur_commitment_point,
8977 counterparty_prev_commitment_point,
8978 counterparty_node_id,
8980 counterparty_shutdown_scriptpubkey,
8984 channel_update_status,
8985 closing_signed_in_flight: false,
8989 #[cfg(any(test, fuzzing))]
8990 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8991 #[cfg(any(test, fuzzing))]
8992 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8994 workaround_lnd_bug_4006: None,
8995 sent_message_awaiting_response: None,
8997 latest_inbound_scid_alias,
8998 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8999 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9001 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9002 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9004 #[cfg(any(test, fuzzing))]
9005 historical_inbound_htlc_fulfills,
9007 channel_type: channel_type.unwrap(),
9010 local_initiated_shutdown,
9012 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9014 #[cfg(dual_funding)]
9015 dual_funding_channel_context: None,
9023 use bitcoin::blockdata::constants::ChainHash;
9024 use bitcoin::blockdata::script::{ScriptBuf, Builder};
9025 use bitcoin::blockdata::transaction::{Transaction, TxOut};
9026 use bitcoin::blockdata::opcodes;
9027 use bitcoin::network::constants::Network;
9028 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9029 use crate::ln::{PaymentHash, PaymentPreimage};
9030 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9031 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9032 use crate::ln::channel::InitFeatures;
9033 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9034 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9035 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9036 use crate::ln::msgs;
9037 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9038 use crate::ln::script::ShutdownScript;
9039 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9040 use crate::chain::BestBlock;
9041 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9042 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9043 use crate::chain::transaction::OutPoint;
9044 use crate::routing::router::{Path, RouteHop};
9045 use crate::util::config::UserConfig;
9046 use crate::util::errors::APIError;
9047 use crate::util::ser::{ReadableArgs, Writeable};
9048 use crate::util::test_utils;
9049 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9050 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9051 use bitcoin::secp256k1::ffi::Signature as FFISignature;
9052 use bitcoin::secp256k1::{SecretKey,PublicKey};
9053 use bitcoin::hashes::sha256::Hash as Sha256;
9054 use bitcoin::hashes::Hash;
9055 use bitcoin::hashes::hex::FromHex;
9056 use bitcoin::hash_types::WPubkeyHash;
9057 use bitcoin::blockdata::locktime::absolute::LockTime;
9058 use bitcoin::address::{WitnessProgram, WitnessVersion};
9059 use crate::prelude::*;
9062 fn test_channel_state_order() {
9063 use crate::ln::channel::NegotiatingFundingFlags;
9064 use crate::ln::channel::AwaitingChannelReadyFlags;
9065 use crate::ln::channel::ChannelReadyFlags;
9067 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9068 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9069 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9070 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9073 struct TestFeeEstimator {
9076 impl FeeEstimator for TestFeeEstimator {
9077 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9083 fn test_max_funding_satoshis_no_wumbo() {
9084 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9085 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9086 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9090 signer: InMemorySigner,
9093 impl EntropySource for Keys {
9094 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9097 impl SignerProvider for Keys {
9098 type EcdsaSigner = InMemorySigner;
9100 type TaprootSigner = InMemorySigner;
9102 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9103 self.signer.channel_keys_id()
9106 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9110 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9112 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9113 let secp_ctx = Secp256k1::signing_only();
9114 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9115 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9116 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9119 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9120 let secp_ctx = Secp256k1::signing_only();
9121 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9122 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9126 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9127 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9128 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9132 fn upfront_shutdown_script_incompatibility() {
9133 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9134 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9135 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9138 let seed = [42; 32];
9139 let network = Network::Testnet;
9140 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9141 keys_provider.expect(OnGetShutdownScriptpubkey {
9142 returns: non_v0_segwit_shutdown_script.clone(),
9145 let secp_ctx = Secp256k1::new();
9146 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9147 let config = UserConfig::default();
9148 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9149 Err(APIError::IncompatibleShutdownScript { script }) => {
9150 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9152 Err(e) => panic!("Unexpected error: {:?}", e),
9153 Ok(_) => panic!("Expected error"),
9157 // Check that, during channel creation, we use the same feerate in the open channel message
9158 // as we do in the Channel object creation itself.
9160 fn test_open_channel_msg_fee() {
9161 let original_fee = 253;
9162 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9163 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9164 let secp_ctx = Secp256k1::new();
9165 let seed = [42; 32];
9166 let network = Network::Testnet;
9167 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9169 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9170 let config = UserConfig::default();
9171 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9173 // Now change the fee so we can check that the fee in the open_channel message is the
9174 // same as the old fee.
9175 fee_est.fee_est = 500;
9176 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9177 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9181 fn test_holder_vs_counterparty_dust_limit() {
9182 // Test that when calculating the local and remote commitment transaction fees, the correct
9183 // dust limits are used.
9184 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9185 let secp_ctx = Secp256k1::new();
9186 let seed = [42; 32];
9187 let network = Network::Testnet;
9188 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9189 let logger = test_utils::TestLogger::new();
9190 let best_block = BestBlock::from_network(network);
9192 // Go through the flow of opening a channel between two nodes, making sure
9193 // they have different dust limits.
9195 // Create Node A's channel pointing to Node B's pubkey
9196 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9197 let config = UserConfig::default();
9198 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9200 // Create Node B's channel by receiving Node A's open_channel message
9201 // Make sure A's dust limit is as we expect.
9202 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9203 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9204 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9206 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9207 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9208 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9209 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9210 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9212 // Node A --> Node B: funding created
9213 let output_script = node_a_chan.context.get_funding_redeemscript();
9214 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9215 value: 10000000, script_pubkey: output_script.clone(),
9217 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9218 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9219 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9221 // Node B --> Node A: funding signed
9222 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9223 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9225 // Put some inbound and outbound HTLCs in A's channel.
9226 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9227 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9229 amount_msat: htlc_amount_msat,
9230 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9231 cltv_expiry: 300000000,
9232 state: InboundHTLCState::Committed,
9235 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9237 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9238 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9239 cltv_expiry: 200000000,
9240 state: OutboundHTLCState::Committed,
9241 source: HTLCSource::OutboundRoute {
9242 path: Path { hops: Vec::new(), blinded_tail: None },
9243 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9244 first_hop_htlc_msat: 548,
9245 payment_id: PaymentId([42; 32]),
9247 skimmed_fee_msat: None,
9248 blinding_point: None,
9251 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9252 // the dust limit check.
9253 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9254 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9255 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9256 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9258 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9259 // of the HTLCs are seen to be above the dust limit.
9260 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9261 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9262 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9263 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9264 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9268 fn test_timeout_vs_success_htlc_dust_limit() {
9269 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9270 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9271 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9272 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9273 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9274 let secp_ctx = Secp256k1::new();
9275 let seed = [42; 32];
9276 let network = Network::Testnet;
9277 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9279 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9280 let config = UserConfig::default();
9281 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9283 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9284 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9286 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9287 // counted as dust when it shouldn't be.
9288 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9289 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9290 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9291 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9293 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9294 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9295 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9296 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9297 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9299 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9301 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9302 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9303 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9304 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9305 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9307 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9308 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9309 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9310 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9311 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9315 fn channel_reestablish_no_updates() {
9316 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9317 let logger = test_utils::TestLogger::new();
9318 let secp_ctx = Secp256k1::new();
9319 let seed = [42; 32];
9320 let network = Network::Testnet;
9321 let best_block = BestBlock::from_network(network);
9322 let chain_hash = ChainHash::using_genesis_block(network);
9323 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9325 // Go through the flow of opening a channel between two nodes.
9327 // Create Node A's channel pointing to Node B's pubkey
9328 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9329 let config = UserConfig::default();
9330 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9332 // Create Node B's channel by receiving Node A's open_channel message
9333 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9334 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9335 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9337 // Node B --> Node A: accept channel
9338 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9339 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9341 // Node A --> Node B: funding created
9342 let output_script = node_a_chan.context.get_funding_redeemscript();
9343 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9344 value: 10000000, script_pubkey: output_script.clone(),
9346 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9347 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9348 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9350 // Node B --> Node A: funding signed
9351 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9352 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9354 // Now disconnect the two nodes and check that the commitment point in
9355 // Node B's channel_reestablish message is sane.
9356 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9357 let msg = node_b_chan.get_channel_reestablish(&&logger);
9358 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9359 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9360 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9362 // Check that the commitment point in Node A's channel_reestablish message
9364 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9365 let msg = node_a_chan.get_channel_reestablish(&&logger);
9366 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9367 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9368 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9372 fn test_configured_holder_max_htlc_value_in_flight() {
9373 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9374 let logger = test_utils::TestLogger::new();
9375 let secp_ctx = Secp256k1::new();
9376 let seed = [42; 32];
9377 let network = Network::Testnet;
9378 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9379 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9380 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9382 let mut config_2_percent = UserConfig::default();
9383 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9384 let mut config_99_percent = UserConfig::default();
9385 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9386 let mut config_0_percent = UserConfig::default();
9387 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9388 let mut config_101_percent = UserConfig::default();
9389 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9391 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9392 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9393 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9394 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9395 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9396 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9398 // Test with the upper bound - 1 of valid values (99%).
9399 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9400 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9401 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9403 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9405 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9406 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9407 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9408 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9409 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9410 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9412 // Test with the upper bound - 1 of valid values (99%).
9413 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9414 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9415 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9417 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9418 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9419 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9420 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9421 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9423 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9424 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9426 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9427 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9428 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9430 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9431 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9432 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9433 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9434 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9436 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9437 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9439 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9440 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9441 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9445 fn test_configured_holder_selected_channel_reserve_satoshis() {
9447 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9448 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9449 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9451 // Test with valid but unreasonably high channel reserves
9452 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9453 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9454 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9456 // Test with calculated channel reserve less than lower bound
9457 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9458 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9460 // Test with invalid channel reserves since sum of both is greater than or equal
9462 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9463 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9466 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9467 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9468 let logger = test_utils::TestLogger::new();
9469 let secp_ctx = Secp256k1::new();
9470 let seed = [42; 32];
9471 let network = Network::Testnet;
9472 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9473 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9474 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9477 let mut outbound_node_config = UserConfig::default();
9478 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9479 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9481 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9482 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9484 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9485 let mut inbound_node_config = UserConfig::default();
9486 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9488 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9489 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9491 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9493 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9494 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9496 // Channel Negotiations failed
9497 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9498 assert!(result.is_err());
9503 fn channel_update() {
9504 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9505 let logger = test_utils::TestLogger::new();
9506 let secp_ctx = Secp256k1::new();
9507 let seed = [42; 32];
9508 let network = Network::Testnet;
9509 let best_block = BestBlock::from_network(network);
9510 let chain_hash = ChainHash::using_genesis_block(network);
9511 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9513 // Create Node A's channel pointing to Node B's pubkey
9514 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9515 let config = UserConfig::default();
9516 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9518 // Create Node B's channel by receiving Node A's open_channel message
9519 // Make sure A's dust limit is as we expect.
9520 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9521 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9522 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9524 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9525 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9526 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9527 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9528 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9530 // Node A --> Node B: funding created
9531 let output_script = node_a_chan.context.get_funding_redeemscript();
9532 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9533 value: 10000000, script_pubkey: output_script.clone(),
9535 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9536 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9537 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9539 // Node B --> Node A: funding signed
9540 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9541 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9543 // Make sure that receiving a channel update will update the Channel as expected.
9544 let update = ChannelUpdate {
9545 contents: UnsignedChannelUpdate {
9547 short_channel_id: 0,
9550 cltv_expiry_delta: 100,
9551 htlc_minimum_msat: 5,
9552 htlc_maximum_msat: MAX_VALUE_MSAT,
9554 fee_proportional_millionths: 11,
9555 excess_data: Vec::new(),
9557 signature: Signature::from(unsafe { FFISignature::new() })
9559 assert!(node_a_chan.channel_update(&update).unwrap());
9561 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9562 // change our official htlc_minimum_msat.
9563 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9564 match node_a_chan.context.counterparty_forwarding_info() {
9566 assert_eq!(info.cltv_expiry_delta, 100);
9567 assert_eq!(info.fee_base_msat, 110);
9568 assert_eq!(info.fee_proportional_millionths, 11);
9570 None => panic!("expected counterparty forwarding info to be Some")
9573 assert!(!node_a_chan.channel_update(&update).unwrap());
9577 fn blinding_point_skimmed_fee_malformed_ser() {
9578 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9580 let logger = test_utils::TestLogger::new();
9581 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9582 let secp_ctx = Secp256k1::new();
9583 let seed = [42; 32];
9584 let network = Network::Testnet;
9585 let best_block = BestBlock::from_network(network);
9586 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9588 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9589 let config = UserConfig::default();
9590 let features = channelmanager::provided_init_features(&config);
9591 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9592 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9594 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9595 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9596 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9598 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9599 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9600 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9602 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9603 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9604 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9605 Ok((chan, _, _)) => chan,
9606 Err((_, e)) => panic!("{}", e),
9609 let dummy_htlc_source = HTLCSource::OutboundRoute {
9611 hops: vec![RouteHop {
9612 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9613 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9614 cltv_expiry_delta: 0, maybe_announced_channel: false,
9618 session_priv: test_utils::privkey(42),
9619 first_hop_htlc_msat: 0,
9620 payment_id: PaymentId([42; 32]),
9622 let dummy_outbound_output = OutboundHTLCOutput {
9625 payment_hash: PaymentHash([43; 32]),
9627 state: OutboundHTLCState::Committed,
9628 source: dummy_htlc_source.clone(),
9629 skimmed_fee_msat: None,
9630 blinding_point: None,
9632 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9633 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9635 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9638 htlc.skimmed_fee_msat = Some(1);
9641 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9643 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9646 payment_hash: PaymentHash([43; 32]),
9647 source: dummy_htlc_source.clone(),
9648 onion_routing_packet: msgs::OnionPacket {
9650 public_key: Ok(test_utils::pubkey(1)),
9651 hop_data: [0; 20*65],
9654 skimmed_fee_msat: None,
9655 blinding_point: None,
9657 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9658 payment_preimage: PaymentPreimage([42; 32]),
9661 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9662 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9664 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9665 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9667 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9670 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9671 } else if i % 5 == 1 {
9672 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9673 } else if i % 5 == 2 {
9674 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9675 if let HTLCUpdateAwaitingACK::AddHTLC {
9676 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9677 } = &mut dummy_add {
9678 *blinding_point = Some(test_utils::pubkey(42 + i));
9679 *skimmed_fee_msat = Some(42);
9681 holding_cell_htlc_updates.push(dummy_add);
9682 } else if i % 5 == 3 {
9683 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9685 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9688 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9690 // Encode and decode the channel and ensure that the HTLCs within are the same.
9691 let encoded_chan = chan.encode();
9692 let mut s = crate::io::Cursor::new(&encoded_chan);
9693 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9694 let features = channelmanager::provided_channel_type_features(&config);
9695 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9696 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9697 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9700 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9702 fn outbound_commitment_test() {
9703 use bitcoin::sighash;
9704 use bitcoin::consensus::encode::serialize;
9705 use bitcoin::sighash::EcdsaSighashType;
9706 use bitcoin::hashes::hex::FromHex;
9707 use bitcoin::hash_types::Txid;
9708 use bitcoin::secp256k1::Message;
9709 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9710 use crate::ln::PaymentPreimage;
9711 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9712 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9713 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9714 use crate::util::logger::Logger;
9715 use crate::sync::Arc;
9716 use core::str::FromStr;
9717 use hex::DisplayHex;
9719 // Test vectors from BOLT 3 Appendices C and F (anchors):
9720 let feeest = TestFeeEstimator{fee_est: 15000};
9721 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9722 let secp_ctx = Secp256k1::new();
9724 let mut signer = InMemorySigner::new(
9726 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9727 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9728 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9729 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9730 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9732 // These aren't set in the test vectors:
9733 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9739 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9740 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9741 let keys_provider = Keys { signer: signer.clone() };
9743 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9744 let mut config = UserConfig::default();
9745 config.channel_handshake_config.announced_channel = false;
9746 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9747 chan.context.holder_dust_limit_satoshis = 546;
9748 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9750 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9752 let counterparty_pubkeys = ChannelPublicKeys {
9753 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9754 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9755 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9756 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9757 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9759 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9760 CounterpartyChannelTransactionParameters {
9761 pubkeys: counterparty_pubkeys.clone(),
9762 selected_contest_delay: 144
9764 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9765 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9767 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9768 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9770 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9771 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9773 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9774 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9776 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9777 // derived from a commitment_seed, so instead we copy it here and call
9778 // build_commitment_transaction.
9779 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9780 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9781 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9782 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9783 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9785 macro_rules! test_commitment {
9786 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9787 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9788 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9792 macro_rules! test_commitment_with_anchors {
9793 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9794 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9795 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9799 macro_rules! test_commitment_common {
9800 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9801 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9803 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9804 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9806 let htlcs = commitment_stats.htlcs_included.drain(..)
9807 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9809 (commitment_stats.tx, htlcs)
9811 let trusted_tx = commitment_tx.trust();
9812 let unsigned_tx = trusted_tx.built_transaction();
9813 let redeemscript = chan.context.get_funding_redeemscript();
9814 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9815 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9816 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9817 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9819 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9820 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9821 let mut counterparty_htlc_sigs = Vec::new();
9822 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9824 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9825 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9826 counterparty_htlc_sigs.push(remote_signature);
9828 assert_eq!(htlcs.len(), per_htlc.len());
9830 let holder_commitment_tx = HolderCommitmentTransaction::new(
9831 commitment_tx.clone(),
9832 counterparty_signature,
9833 counterparty_htlc_sigs,
9834 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9835 chan.context.counterparty_funding_pubkey()
9837 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9838 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9840 let funding_redeemscript = chan.context.get_funding_redeemscript();
9841 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9842 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9844 // ((htlc, counterparty_sig), (index, holder_sig))
9845 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9848 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9849 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9851 let ref htlc = htlcs[$htlc_idx];
9852 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9853 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9854 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9855 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9856 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9857 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9858 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9860 let mut preimage: Option<PaymentPreimage> = None;
9863 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9864 if out == htlc.payment_hash {
9865 preimage = Some(PaymentPreimage([i; 32]));
9869 assert!(preimage.is_some());
9872 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9873 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9874 channel_derivation_parameters: ChannelDerivationParameters {
9875 value_satoshis: chan.context.channel_value_satoshis,
9876 keys_id: chan.context.channel_keys_id,
9877 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9879 commitment_txid: trusted_tx.txid(),
9880 per_commitment_number: trusted_tx.commitment_number(),
9881 per_commitment_point: trusted_tx.per_commitment_point(),
9882 feerate_per_kw: trusted_tx.feerate_per_kw(),
9884 preimage: preimage.clone(),
9885 counterparty_sig: *htlc_counterparty_sig,
9886 }, &secp_ctx).unwrap();
9887 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9888 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9890 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9891 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9892 let trusted_tx = holder_commitment_tx.trust();
9893 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9894 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9895 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9897 assert!(htlc_counterparty_sig_iter.next().is_none());
9901 // anchors: simple commitment tx with no HTLCs and single anchor
9902 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9903 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9904 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9906 // simple commitment tx with no HTLCs
9907 chan.context.value_to_self_msat = 7000000000;
9909 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9910 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9911 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9913 // anchors: simple commitment tx with no HTLCs
9914 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9915 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9916 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9918 chan.context.pending_inbound_htlcs.push({
9919 let mut out = InboundHTLCOutput{
9921 amount_msat: 1000000,
9923 payment_hash: PaymentHash([0; 32]),
9924 state: InboundHTLCState::Committed,
9926 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9929 chan.context.pending_inbound_htlcs.push({
9930 let mut out = InboundHTLCOutput{
9932 amount_msat: 2000000,
9934 payment_hash: PaymentHash([0; 32]),
9935 state: InboundHTLCState::Committed,
9937 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9940 chan.context.pending_outbound_htlcs.push({
9941 let mut out = OutboundHTLCOutput{
9943 amount_msat: 2000000,
9945 payment_hash: PaymentHash([0; 32]),
9946 state: OutboundHTLCState::Committed,
9947 source: HTLCSource::dummy(),
9948 skimmed_fee_msat: None,
9949 blinding_point: None,
9951 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9954 chan.context.pending_outbound_htlcs.push({
9955 let mut out = OutboundHTLCOutput{
9957 amount_msat: 3000000,
9959 payment_hash: PaymentHash([0; 32]),
9960 state: OutboundHTLCState::Committed,
9961 source: HTLCSource::dummy(),
9962 skimmed_fee_msat: None,
9963 blinding_point: None,
9965 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9968 chan.context.pending_inbound_htlcs.push({
9969 let mut out = InboundHTLCOutput{
9971 amount_msat: 4000000,
9973 payment_hash: PaymentHash([0; 32]),
9974 state: InboundHTLCState::Committed,
9976 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9980 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9981 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9982 chan.context.feerate_per_kw = 0;
9984 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9985 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9986 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9989 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9990 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9991 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9994 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9995 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9996 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9999 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10000 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10001 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10004 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10005 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10006 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10009 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10010 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10011 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10014 // commitment tx with seven outputs untrimmed (maximum feerate)
10015 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10016 chan.context.feerate_per_kw = 647;
10018 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10019 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10020 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10023 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10024 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10025 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10028 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10029 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10030 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10033 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10034 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10035 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10038 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10039 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10040 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10043 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10044 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10045 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10048 // commitment tx with six outputs untrimmed (minimum feerate)
10049 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10050 chan.context.feerate_per_kw = 648;
10052 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10053 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10054 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10057 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10058 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10059 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10062 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10063 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10064 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10067 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10068 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10069 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10072 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10073 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10074 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10077 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10078 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10079 chan.context.feerate_per_kw = 645;
10080 chan.context.holder_dust_limit_satoshis = 1001;
10082 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10083 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10084 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10087 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10088 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10089 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10092 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10093 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10094 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10097 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10098 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10099 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10102 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10103 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10104 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10107 // commitment tx with six outputs untrimmed (maximum feerate)
10108 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10109 chan.context.feerate_per_kw = 2069;
10110 chan.context.holder_dust_limit_satoshis = 546;
10112 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10113 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10114 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10117 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10118 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10119 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10122 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10123 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10124 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10127 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10128 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10129 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10132 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10133 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10134 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10137 // commitment tx with five outputs untrimmed (minimum feerate)
10138 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10139 chan.context.feerate_per_kw = 2070;
10141 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10142 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10143 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10146 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10147 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10148 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10151 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10152 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10153 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10156 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10157 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10158 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10161 // commitment tx with five outputs untrimmed (maximum feerate)
10162 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10163 chan.context.feerate_per_kw = 2194;
10165 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10166 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10167 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10170 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10171 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10172 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10175 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10176 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10177 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10180 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10181 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10182 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10185 // commitment tx with four outputs untrimmed (minimum feerate)
10186 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10187 chan.context.feerate_per_kw = 2195;
10189 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10190 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10191 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10194 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10195 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10196 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10199 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10200 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10201 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10204 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10205 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10206 chan.context.feerate_per_kw = 2185;
10207 chan.context.holder_dust_limit_satoshis = 2001;
10208 let cached_channel_type = chan.context.channel_type;
10209 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10211 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10212 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10213 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10216 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10217 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10218 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10221 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10222 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10223 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10226 // commitment tx with four outputs untrimmed (maximum feerate)
10227 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10228 chan.context.feerate_per_kw = 3702;
10229 chan.context.holder_dust_limit_satoshis = 546;
10230 chan.context.channel_type = cached_channel_type.clone();
10232 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10233 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10234 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10237 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10238 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10239 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10242 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10243 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10244 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10247 // commitment tx with three outputs untrimmed (minimum feerate)
10248 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10249 chan.context.feerate_per_kw = 3703;
10251 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10252 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10253 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10256 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10257 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10258 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10261 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10262 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10263 chan.context.feerate_per_kw = 3687;
10264 chan.context.holder_dust_limit_satoshis = 3001;
10265 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10267 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10268 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10269 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10272 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10273 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10274 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10277 // commitment tx with three outputs untrimmed (maximum feerate)
10278 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10279 chan.context.feerate_per_kw = 4914;
10280 chan.context.holder_dust_limit_satoshis = 546;
10281 chan.context.channel_type = cached_channel_type.clone();
10283 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10284 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10285 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10288 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10289 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10290 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10293 // commitment tx with two outputs untrimmed (minimum feerate)
10294 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10295 chan.context.feerate_per_kw = 4915;
10296 chan.context.holder_dust_limit_satoshis = 546;
10298 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10299 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10300 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10302 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10303 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10304 chan.context.feerate_per_kw = 4894;
10305 chan.context.holder_dust_limit_satoshis = 4001;
10306 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10308 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10309 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10310 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10312 // commitment tx with two outputs untrimmed (maximum feerate)
10313 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10314 chan.context.feerate_per_kw = 9651180;
10315 chan.context.holder_dust_limit_satoshis = 546;
10316 chan.context.channel_type = cached_channel_type.clone();
10318 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10319 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10320 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10322 // commitment tx with one output untrimmed (minimum feerate)
10323 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10324 chan.context.feerate_per_kw = 9651181;
10326 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10327 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10328 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10330 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10331 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10332 chan.context.feerate_per_kw = 6216010;
10333 chan.context.holder_dust_limit_satoshis = 4001;
10334 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10336 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10337 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10338 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10340 // commitment tx with fee greater than funder amount
10341 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10342 chan.context.feerate_per_kw = 9651936;
10343 chan.context.holder_dust_limit_satoshis = 546;
10344 chan.context.channel_type = cached_channel_type;
10346 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10347 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10348 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10350 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10351 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10352 chan.context.feerate_per_kw = 253;
10353 chan.context.pending_inbound_htlcs.clear();
10354 chan.context.pending_inbound_htlcs.push({
10355 let mut out = InboundHTLCOutput{
10357 amount_msat: 2000000,
10359 payment_hash: PaymentHash([0; 32]),
10360 state: InboundHTLCState::Committed,
10362 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10365 chan.context.pending_outbound_htlcs.clear();
10366 chan.context.pending_outbound_htlcs.push({
10367 let mut out = OutboundHTLCOutput{
10369 amount_msat: 5000001,
10371 payment_hash: PaymentHash([0; 32]),
10372 state: OutboundHTLCState::Committed,
10373 source: HTLCSource::dummy(),
10374 skimmed_fee_msat: None,
10375 blinding_point: None,
10377 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10380 chan.context.pending_outbound_htlcs.push({
10381 let mut out = OutboundHTLCOutput{
10383 amount_msat: 5000000,
10385 payment_hash: PaymentHash([0; 32]),
10386 state: OutboundHTLCState::Committed,
10387 source: HTLCSource::dummy(),
10388 skimmed_fee_msat: None,
10389 blinding_point: None,
10391 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10395 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10396 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10397 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10400 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10401 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10402 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10404 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10405 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10406 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10408 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10409 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10410 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10413 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10414 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10415 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10416 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10419 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10420 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10421 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10423 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10424 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10425 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10427 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10428 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10429 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10434 fn test_per_commitment_secret_gen() {
10435 // Test vectors from BOLT 3 Appendix D:
10437 let mut seed = [0; 32];
10438 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10439 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10440 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10442 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10443 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10444 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10446 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10447 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10449 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10450 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10452 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10453 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10454 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10458 fn test_key_derivation() {
10459 // Test vectors from BOLT 3 Appendix E:
10460 let secp_ctx = Secp256k1::new();
10462 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10463 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10465 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10466 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10468 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10469 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10471 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10472 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10474 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10475 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10477 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10478 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10482 fn test_zero_conf_channel_type_support() {
10483 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10484 let secp_ctx = Secp256k1::new();
10485 let seed = [42; 32];
10486 let network = Network::Testnet;
10487 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10488 let logger = test_utils::TestLogger::new();
10490 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10491 let config = UserConfig::default();
10492 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10493 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10495 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10496 channel_type_features.set_zero_conf_required();
10498 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10499 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10500 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10501 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10502 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10503 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10504 assert!(res.is_ok());
10508 fn test_supports_anchors_zero_htlc_tx_fee() {
10509 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10510 // resulting `channel_type`.
10511 let secp_ctx = Secp256k1::new();
10512 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10513 let network = Network::Testnet;
10514 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10515 let logger = test_utils::TestLogger::new();
10517 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10518 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10520 let mut config = UserConfig::default();
10521 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10523 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10524 // need to signal it.
10525 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10526 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10527 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10528 &config, 0, 42, None
10530 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10532 let mut expected_channel_type = ChannelTypeFeatures::empty();
10533 expected_channel_type.set_static_remote_key_required();
10534 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10536 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10537 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10538 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10542 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10543 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10544 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10545 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10546 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10549 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10550 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10554 fn test_rejects_implicit_simple_anchors() {
10555 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10556 // each side's `InitFeatures`, it is rejected.
10557 let secp_ctx = Secp256k1::new();
10558 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10559 let network = Network::Testnet;
10560 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10561 let logger = test_utils::TestLogger::new();
10563 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10564 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10566 let config = UserConfig::default();
10568 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10569 let static_remote_key_required: u64 = 1 << 12;
10570 let simple_anchors_required: u64 = 1 << 20;
10571 let raw_init_features = static_remote_key_required | simple_anchors_required;
10572 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10574 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10575 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10576 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10580 // Set `channel_type` to `None` to force the implicit feature negotiation.
10581 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10582 open_channel_msg.common_fields.channel_type = None;
10584 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10585 // `static_remote_key`, it will fail the channel.
10586 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10587 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10588 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10589 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10591 assert!(channel_b.is_err());
10595 fn test_rejects_simple_anchors_channel_type() {
10596 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10598 let secp_ctx = Secp256k1::new();
10599 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10600 let network = Network::Testnet;
10601 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10602 let logger = test_utils::TestLogger::new();
10604 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10605 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10607 let config = UserConfig::default();
10609 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10610 let static_remote_key_required: u64 = 1 << 12;
10611 let simple_anchors_required: u64 = 1 << 20;
10612 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10613 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10614 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10615 assert!(!simple_anchors_init.requires_unknown_bits());
10616 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10618 // First, we'll try to open a channel between A and B where A requests a channel type for
10619 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10620 // B as it's not supported by LDK.
10621 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10622 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10623 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10627 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10628 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10630 let res = InboundV1Channel::<&TestKeysInterface>::new(
10631 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10632 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10633 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10635 assert!(res.is_err());
10637 // Then, we'll try to open another channel where A requests a channel type for
10638 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10639 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10641 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10642 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10643 10000000, 100000, 42, &config, 0, 42, None
10646 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10648 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10649 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10650 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10651 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10654 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10655 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10657 let res = channel_a.accept_channel(
10658 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10660 assert!(res.is_err());
10664 fn test_waiting_for_batch() {
10665 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10666 let logger = test_utils::TestLogger::new();
10667 let secp_ctx = Secp256k1::new();
10668 let seed = [42; 32];
10669 let network = Network::Testnet;
10670 let best_block = BestBlock::from_network(network);
10671 let chain_hash = ChainHash::using_genesis_block(network);
10672 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10674 let mut config = UserConfig::default();
10675 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10676 // channel in a batch before all channels are ready.
10677 config.channel_handshake_limits.trust_own_funding_0conf = true;
10679 // Create a channel from node a to node b that will be part of batch funding.
10680 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10681 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10686 &channelmanager::provided_init_features(&config),
10696 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10697 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10698 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10703 &channelmanager::provided_channel_type_features(&config),
10704 &channelmanager::provided_init_features(&config),
10710 true, // Allow node b to send a 0conf channel_ready.
10713 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10714 node_a_chan.accept_channel(
10715 &accept_channel_msg,
10716 &config.channel_handshake_limits,
10717 &channelmanager::provided_init_features(&config),
10720 // Fund the channel with a batch funding transaction.
10721 let output_script = node_a_chan.context.get_funding_redeemscript();
10722 let tx = Transaction {
10724 lock_time: LockTime::ZERO,
10728 value: 10000000, script_pubkey: output_script.clone(),
10731 value: 10000000, script_pubkey: Builder::new().into_script(),
10734 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10735 let funding_created_msg = node_a_chan.get_funding_created(
10736 tx.clone(), funding_outpoint, true, &&logger,
10737 ).map_err(|_| ()).unwrap();
10738 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10739 &funding_created_msg.unwrap(),
10743 ).map_err(|_| ()).unwrap();
10744 let node_b_updates = node_b_chan.monitor_updating_restored(
10752 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10753 // broadcasting the funding transaction until the batch is ready.
10754 let res = node_a_chan.funding_signed(
10755 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10757 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10758 let node_a_updates = node_a_chan.monitor_updating_restored(
10765 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10766 // as the funding transaction depends on all channels in the batch becoming ready.
10767 assert!(node_a_updates.channel_ready.is_none());
10768 assert!(node_a_updates.funding_broadcastable.is_none());
10769 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10771 // It is possible to receive a 0conf channel_ready from the remote node.
10772 node_a_chan.channel_ready(
10773 &node_b_updates.channel_ready.unwrap(),
10781 node_a_chan.context.channel_state,
10782 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10785 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10786 node_a_chan.set_batch_ready();
10787 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10788 assert!(node_a_chan.check_get_channel_ready(0).is_some());