1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::amount::Amount;
11 use bitcoin::blockdata::constants::ChainHash;
12 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
13 use bitcoin::blockdata::transaction::Transaction;
15 use bitcoin::sighash::EcdsaSighashType;
16 use bitcoin::consensus::encode;
18 use bitcoin::hashes::Hash;
19 use bitcoin::hashes::sha256::Hash as Sha256;
20 use bitcoin::hashes::sha256d::Hash as Sha256d;
21 use bitcoin::hash_types::{Txid, BlockHash};
23 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
24 use bitcoin::secp256k1::{PublicKey,SecretKey};
25 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
26 use bitcoin::secp256k1;
28 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash};
29 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
31 use crate::ln::msgs::DecodeError;
32 use crate::ln::script::{self, ShutdownScript};
33 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
34 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
35 use crate::ln::chan_utils;
36 use crate::ln::onion_utils::HTLCFailReason;
37 use crate::chain::BestBlock;
38 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
39 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
40 use crate::chain::transaction::{OutPoint, TransactionData};
41 use crate::sign::ecdsa::EcdsaChannelSigner;
42 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
43 use crate::events::ClosureReason;
44 use crate::routing::gossip::NodeId;
45 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
46 use crate::util::logger::{Logger, Record, WithContext};
47 use crate::util::errors::APIError;
48 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
49 use crate::util::scid_utils::scid_from_parts;
52 use crate::prelude::*;
53 use core::{cmp,mem,fmt};
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 /// Represents the resolution status of an inbound HTLC.
109 enum InboundHTLCResolution {
110 /// Resolved implies the action we must take with the inbound HTLC has already been determined,
111 /// i.e., we already know whether it must be failed back or forwarded.
113 // TODO: Once this variant is removed, we should also clean up
114 // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
116 pending_htlc_status: PendingHTLCStatus,
118 /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
119 /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
120 /// nodes for the state update in which it was proposed.
122 update_add_htlc: msgs::UpdateAddHTLC,
126 impl_writeable_tlv_based_enum!(InboundHTLCResolution,
128 (0, pending_htlc_status, required),
131 (0, update_add_htlc, required),
135 enum InboundHTLCState {
136 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
137 /// update_add_htlc message for this HTLC.
138 RemoteAnnounced(InboundHTLCResolution),
139 /// Included in a received commitment_signed message (implying we've
140 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
141 /// state (see the example below). We have not yet included this HTLC in a
142 /// commitment_signed message because we are waiting on the remote's
143 /// aforementioned state revocation. One reason this missing remote RAA
144 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
145 /// is because every time we create a new "state", i.e. every time we sign a
146 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
147 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
148 /// sent provided the per_commitment_point for our current commitment tx.
149 /// The other reason we should not send a commitment_signed without their RAA
150 /// is because their RAA serves to ACK our previous commitment_signed.
152 /// Here's an example of how an HTLC could come to be in this state:
153 /// remote --> update_add_htlc(prev_htlc) --> local
154 /// remote --> commitment_signed(prev_htlc) --> local
155 /// remote <-- revoke_and_ack <-- local
156 /// remote <-- commitment_signed(prev_htlc) <-- local
157 /// [note that here, the remote does not respond with a RAA]
158 /// remote --> update_add_htlc(this_htlc) --> local
159 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
160 /// Now `this_htlc` will be assigned this state. It's unable to be officially
161 /// accepted, i.e. included in a commitment_signed, because we're missing the
162 /// RAA that provides our next per_commitment_point. The per_commitment_point
163 /// is used to derive commitment keys, which are used to construct the
164 /// signatures in a commitment_signed message.
165 /// Implies AwaitingRemoteRevoke.
167 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
168 AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
169 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
170 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
171 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
172 /// channel (before it can then get forwarded and/or removed).
173 /// Implies AwaitingRemoteRevoke.
174 AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
176 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
177 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
179 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
180 /// commitment transaction without it as otherwise we'll have to force-close the channel to
181 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
182 /// anyway). That said, ChannelMonitor does this for us (see
183 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
184 /// our own local state before then, once we're sure that the next commitment_signed and
185 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
186 LocalRemoved(InboundHTLCRemovalReason),
189 /// Exposes the state of pending inbound HTLCs.
191 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
192 /// through the following states in the state machine:
193 /// - Announced for addition by the originating node through the update_add_htlc message.
194 /// - Added to the commitment transaction of the receiving node and originating node in turn
195 /// through the exchange of commitment_signed and revoke_and_ack messages.
196 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
197 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
198 /// - Removed from the commitment transaction of the originating node and receiving node in turn
199 /// through the exchange of commitment_signed and revoke_and_ack messages.
201 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
202 #[derive(Clone, Debug, PartialEq)]
203 pub enum InboundHTLCStateDetails {
204 /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
205 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
206 /// before this HTLC is included on the remote commitment transaction.
207 AwaitingRemoteRevokeToAdd,
208 /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
209 /// and is included in both commitment transactions.
211 /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
212 /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
213 /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
214 /// payment, it will only be claimed together with other required parts.
216 /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
217 /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
218 /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
219 /// commitment transaction after update_fulfill_htlc.
220 AwaitingRemoteRevokeToRemoveFulfill,
221 /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
222 /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
223 /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
225 AwaitingRemoteRevokeToRemoveFail,
228 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
229 fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
231 InboundHTLCState::RemoteAnnounced(_) => None,
232 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
233 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
234 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
235 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
236 InboundHTLCState::Committed =>
237 Some(InboundHTLCStateDetails::Committed),
238 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
239 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
240 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
241 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
242 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
243 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
248 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
249 (0, AwaitingRemoteRevokeToAdd) => {},
250 (2, Committed) => {},
251 (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
252 (6, AwaitingRemoteRevokeToRemoveFail) => {};
255 struct InboundHTLCOutput {
259 payment_hash: PaymentHash,
260 state: InboundHTLCState,
263 /// Exposes details around pending inbound HTLCs.
264 #[derive(Clone, Debug, PartialEq)]
265 pub struct InboundHTLCDetails {
267 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
268 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
269 /// and not part of any commitment transaction.
271 /// The amount in msat.
272 pub amount_msat: u64,
273 /// The block height at which this HTLC expires.
274 pub cltv_expiry: u32,
275 /// The payment hash.
276 pub payment_hash: PaymentHash,
277 /// The state of the HTLC in the state machine.
279 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
280 /// waiting for to advance to the next state.
282 /// See [`InboundHTLCStateDetails`] for information on the specific states.
284 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
285 /// states may result in `None` here.
286 pub state: Option<InboundHTLCStateDetails>,
287 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
288 /// from the local commitment transaction and added to the commitment transaction fee.
289 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
290 /// transactions as well.
292 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
293 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
296 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
297 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
301 impl_writeable_tlv_based!(InboundHTLCDetails, {
302 (0, htlc_id, required),
303 (2, amount_msat, required),
304 (4, cltv_expiry, required),
305 (6, payment_hash, required),
306 (7, state, upgradable_option),
307 (8, is_dust, required),
310 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
311 enum OutboundHTLCState {
312 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
313 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
314 /// we will promote to Committed (note that they may not accept it until the next time we
315 /// revoke, but we don't really care about that:
316 /// * they've revoked, so worst case we can announce an old state and get our (option on)
317 /// money back (though we won't), and,
318 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
319 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
320 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
321 /// we'll never get out of sync).
322 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
323 /// OutboundHTLCOutput's size just for a temporary bit
324 LocalAnnounced(Box<msgs::OnionPacket>),
326 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
327 /// the change (though they'll need to revoke before we fail the payment).
328 RemoteRemoved(OutboundHTLCOutcome),
329 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
330 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
331 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
332 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
333 /// remote revoke_and_ack on a previous state before we can do so.
334 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
335 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
336 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
337 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
338 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
339 /// revoke_and_ack to drop completely.
340 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
343 /// Exposes the state of pending outbound HTLCs.
345 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
346 /// through the following states in the state machine:
347 /// - Announced for addition by the originating node through the update_add_htlc message.
348 /// - Added to the commitment transaction of the receiving node and originating node in turn
349 /// through the exchange of commitment_signed and revoke_and_ack messages.
350 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
351 /// the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
352 /// - Removed from the commitment transaction of the originating node and receiving node in turn
353 /// through the exchange of commitment_signed and revoke_and_ack messages.
355 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
356 #[derive(Clone, Debug, PartialEq)]
357 pub enum OutboundHTLCStateDetails {
358 /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
359 /// on the remote's commitment transaction after update_add_htlc.
360 AwaitingRemoteRevokeToAdd,
361 /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
362 /// and receiving revoke_and_ack in return.
364 /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
365 /// unilaterally close the channel due to a timeout with an uncooperative remote node.
367 /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
368 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
369 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
370 /// for the removal from its commitment transaction.
371 AwaitingRemoteRevokeToRemoveSuccess,
372 /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
373 /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
374 /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
375 /// for the removal from its commitment transaction.
376 AwaitingRemoteRevokeToRemoveFailure,
379 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
380 fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
382 OutboundHTLCState::LocalAnnounced(_) =>
383 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
384 OutboundHTLCState::Committed =>
385 OutboundHTLCStateDetails::Committed,
386 // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
388 OutboundHTLCState::RemoteRemoved(_) =>
389 OutboundHTLCStateDetails::Committed,
390 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
391 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
392 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
393 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
394 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
395 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
396 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
397 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
402 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
403 (0, AwaitingRemoteRevokeToAdd) => {},
404 (2, Committed) => {},
405 (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
406 (6, AwaitingRemoteRevokeToRemoveFailure) => {};
410 #[cfg_attr(test, derive(Debug, PartialEq))]
411 enum OutboundHTLCOutcome {
412 /// LDK version 0.0.105+ will always fill in the preimage here.
413 Success(Option<PaymentPreimage>),
414 Failure(HTLCFailReason),
417 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
418 fn from(o: Option<HTLCFailReason>) -> Self {
420 None => OutboundHTLCOutcome::Success(None),
421 Some(r) => OutboundHTLCOutcome::Failure(r)
426 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
427 fn into(self) -> Option<&'a HTLCFailReason> {
429 OutboundHTLCOutcome::Success(_) => None,
430 OutboundHTLCOutcome::Failure(ref r) => Some(r)
435 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
436 struct OutboundHTLCOutput {
440 payment_hash: PaymentHash,
441 state: OutboundHTLCState,
443 blinding_point: Option<PublicKey>,
444 skimmed_fee_msat: Option<u64>,
447 /// Exposes details around pending outbound HTLCs.
448 #[derive(Clone, Debug, PartialEq)]
449 pub struct OutboundHTLCDetails {
451 /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
452 /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
453 /// and not part of any commitment transaction.
455 /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
456 pub htlc_id: Option<u64>,
457 /// The amount in msat.
458 pub amount_msat: u64,
459 /// The block height at which this HTLC expires.
460 pub cltv_expiry: u32,
461 /// The payment hash.
462 pub payment_hash: PaymentHash,
463 /// The state of the HTLC in the state machine.
465 /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
466 /// waiting for to advance to the next state.
468 /// See [`OutboundHTLCStateDetails`] for information on the specific states.
470 /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
471 /// states may result in `None` here.
472 pub state: Option<OutboundHTLCStateDetails>,
473 /// The extra fee being skimmed off the top of this HTLC.
474 pub skimmed_fee_msat: Option<u64>,
475 /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
476 /// from the local commitment transaction and added to the commitment transaction fee.
477 /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
478 /// transactions as well.
480 /// When the local commitment transaction is broadcasted as part of a unilateral closure,
481 /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
484 /// Note that dust limits are specific to each party. An HTLC can be dust for the local
485 /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
489 impl_writeable_tlv_based!(OutboundHTLCDetails, {
490 (0, htlc_id, required),
491 (2, amount_msat, required),
492 (4, cltv_expiry, required),
493 (6, payment_hash, required),
494 (7, state, upgradable_option),
495 (8, skimmed_fee_msat, required),
496 (10, is_dust, required),
499 /// See AwaitingRemoteRevoke ChannelState for more info
500 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
501 enum HTLCUpdateAwaitingACK {
502 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
506 payment_hash: PaymentHash,
508 onion_routing_packet: msgs::OnionPacket,
509 // The extra fee we're skimming off the top of this HTLC.
510 skimmed_fee_msat: Option<u64>,
511 blinding_point: Option<PublicKey>,
514 payment_preimage: PaymentPreimage,
519 err_packet: msgs::OnionErrorPacket,
524 sha256_of_onion: [u8; 32],
528 macro_rules! define_state_flags {
529 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
530 #[doc = $flag_type_doc]
531 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
532 struct $flag_type(u32);
537 const $flag: $flag_type = $flag_type($value);
540 /// All flags that apply to the specified [`ChannelState`] variant.
542 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
545 fn new() -> Self { Self(0) }
548 fn from_u32(flags: u32) -> Result<Self, ()> {
549 if flags & !Self::ALL.0 != 0 {
552 Ok($flag_type(flags))
557 fn is_empty(&self) -> bool { self.0 == 0 }
559 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
561 fn set(&mut self, flag: Self) { *self |= flag }
563 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
567 define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
570 impl core::ops::BitOr for $flag_type {
572 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
574 impl core::ops::BitOrAssign for $flag_type {
575 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
577 impl core::ops::BitAnd for $flag_type {
579 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
581 impl core::ops::BitAndAssign for $flag_type {
582 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
585 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
586 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
588 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
591 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
593 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
595 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
598 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
599 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
601 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
602 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
603 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
604 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
605 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
606 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
607 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
608 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
610 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
612 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
614 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
615 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
617 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
619 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
621 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
622 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
624 impl PartialEq<FundedStateFlags> for $flag_type {
625 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
627 impl From<FundedStateFlags> for $flag_type {
628 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
633 /// We declare all the states/flags here together to help determine which bits are still available
636 pub const OUR_INIT_SENT: u32 = 1 << 0;
637 pub const THEIR_INIT_SENT: u32 = 1 << 1;
638 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
639 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
640 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
641 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
642 pub const CHANNEL_READY: u32 = 1 << 6;
643 pub const PEER_DISCONNECTED: u32 = 1 << 7;
644 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
645 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
646 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
647 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
648 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
649 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
653 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
655 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
656 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
657 is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
658 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
659 somewhere and we should pause sending any outbound messages until they've managed to \
660 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
661 is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
662 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
663 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
664 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
665 is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
666 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
667 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
668 is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
673 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
674 NegotiatingFundingFlags, [
675 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
676 OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
677 ("Indicates we have received their `open_channel`/`accept_channel` message.",
678 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
683 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
684 FUNDED_STATE, AwaitingChannelReadyFlags, [
685 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
686 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
687 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
688 is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
689 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
690 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
691 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
692 is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
693 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
694 is being held until all channels in the batch have received `funding_signed` and have \
695 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
696 is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
701 "Flags that only apply to [`ChannelState::ChannelReady`].",
702 FUNDED_STATE, ChannelReadyFlags, [
703 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
704 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
705 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
706 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
707 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
708 is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
712 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
713 // into account when introducing new states and update `test_channel_state_order` accordingly.
714 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
716 /// We are negotiating the parameters required for the channel prior to funding it.
717 NegotiatingFunding(NegotiatingFundingFlags),
718 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
719 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
720 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
722 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
723 /// funding transaction to confirm.
724 AwaitingChannelReady(AwaitingChannelReadyFlags),
725 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
727 ChannelReady(ChannelReadyFlags),
728 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
729 /// is about to drop us, but we store this anyway.
733 macro_rules! impl_state_flag {
734 ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
736 fn $get(&self) -> bool {
739 ChannelState::$state(flags) => flags.$get(),
748 ChannelState::$state(flags) => flags.$set(),
750 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
754 fn $clear(&mut self) {
757 ChannelState::$state(flags) => { let _ = flags.$clear(); },
759 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
763 ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
764 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
766 ($get: ident, $set: ident, $clear: ident, $state: ident) => {
767 impl_state_flag!($get, $set, $clear, [$state]);
772 fn from_u32(state: u32) -> Result<Self, ()> {
774 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
775 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
777 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
778 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
779 .map(|flags| ChannelState::AwaitingChannelReady(flags))
780 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
781 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
782 .map(|flags| ChannelState::ChannelReady(flags))
783 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
784 Ok(ChannelState::NegotiatingFunding(flags))
792 fn to_u32(&self) -> u32 {
794 ChannelState::NegotiatingFunding(flags) => flags.0,
795 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
796 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
797 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
798 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
802 fn is_pre_funded_state(&self) -> bool {
803 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
806 fn is_both_sides_shutdown(&self) -> bool {
807 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
810 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
812 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
813 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
814 _ => FundedStateFlags::new(),
818 fn can_generate_new_commitment(&self) -> bool {
820 ChannelState::ChannelReady(flags) =>
821 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
822 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
823 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
825 debug_assert!(false, "Can only generate new commitment within ChannelReady");
831 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
832 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
833 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
834 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
835 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
836 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
837 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
838 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
841 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
843 pub const DEFAULT_MAX_HTLCS: u16 = 50;
845 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
846 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
847 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
848 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
852 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
854 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
856 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
858 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
859 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
860 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
861 /// `holder_max_htlc_value_in_flight_msat`.
862 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
864 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
865 /// `option_support_large_channel` (aka wumbo channels) is not supported.
867 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
869 /// Total bitcoin supply in satoshis.
870 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
872 /// The maximum network dust limit for standard script formats. This currently represents the
873 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
874 /// transaction non-standard and thus refuses to relay it.
875 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
876 /// implementations use this value for their dust limit today.
877 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
879 /// The maximum channel dust limit we will accept from our counterparty.
880 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
882 /// The dust limit is used for both the commitment transaction outputs as well as the closing
883 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
884 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
885 /// In order to avoid having to concern ourselves with standardness during the closing process, we
886 /// simply require our counterparty to use a dust limit which will leave any segwit output
888 /// See <https://github.com/lightning/bolts/issues/905> for more details.
889 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
891 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
892 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
894 /// Used to return a simple Error back to ChannelManager. Will get converted to a
895 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
896 /// channel_id in ChannelManager.
897 pub(super) enum ChannelError {
903 impl fmt::Debug for ChannelError {
904 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
906 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
907 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
908 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
913 impl fmt::Display for ChannelError {
914 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
916 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
917 &ChannelError::Warn(ref e) => write!(f, "{}", e),
918 &ChannelError::Close(ref e) => write!(f, "{}", e),
923 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
925 pub peer_id: Option<PublicKey>,
926 pub channel_id: Option<ChannelId>,
927 pub payment_hash: Option<PaymentHash>,
930 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
931 fn log(&self, mut record: Record) {
932 record.peer_id = self.peer_id;
933 record.channel_id = self.channel_id;
934 record.payment_hash = self.payment_hash;
935 self.logger.log(record)
939 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
940 where L::Target: Logger {
941 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>, payment_hash: Option<PaymentHash>) -> Self
942 where S::Target: SignerProvider
946 peer_id: Some(context.counterparty_node_id),
947 channel_id: Some(context.channel_id),
953 macro_rules! secp_check {
954 ($res: expr, $err: expr) => {
957 Err(_) => return Err(ChannelError::Close($err)),
962 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
963 /// our counterparty or not. However, we don't want to announce updates right away to avoid
964 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
965 /// our channel_update message and track the current state here.
966 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
967 #[derive(Clone, Copy, PartialEq)]
968 pub(super) enum ChannelUpdateStatus {
969 /// We've announced the channel as enabled and are connected to our peer.
971 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
973 /// Our channel is live again, but we haven't announced the channel as enabled yet.
975 /// We've announced the channel as disabled.
979 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
981 pub enum AnnouncementSigsState {
982 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
983 /// we sent the last `AnnouncementSignatures`.
985 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
986 /// This state never appears on disk - instead we write `NotSent`.
988 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
989 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
990 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
991 /// they send back a `RevokeAndACK`.
992 /// This state never appears on disk - instead we write `NotSent`.
994 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
995 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
999 /// An enum indicating whether the local or remote side offered a given HTLC.
1000 enum HTLCInitiator {
1005 /// Current counts of various HTLCs, useful for calculating current balances available exactly.
1007 pending_inbound_htlcs: usize,
1008 pending_outbound_htlcs: usize,
1009 pending_inbound_htlcs_value_msat: u64,
1010 pending_outbound_htlcs_value_msat: u64,
1011 on_counterparty_tx_dust_exposure_msat: u64,
1012 on_holder_tx_dust_exposure_msat: u64,
1013 outbound_holding_cell_msat: u64,
1014 on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
1017 /// An enum gathering stats on commitment transaction, either local or remote.
1018 struct CommitmentStats<'a> {
1019 tx: CommitmentTransaction, // the transaction info
1020 feerate_per_kw: u32, // the feerate included to build the transaction
1021 total_fee_sat: u64, // the total fee included in the transaction
1022 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
1023 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
1024 local_balance_msat: u64, // local balance before fees *not* considering dust limits
1025 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
1026 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
1027 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
1030 /// Used when calculating whether we or the remote can afford an additional HTLC.
1031 struct HTLCCandidate {
1033 origin: HTLCInitiator,
1036 impl HTLCCandidate {
1037 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1045 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1047 enum UpdateFulfillFetch {
1049 monitor_update: ChannelMonitorUpdate,
1050 htlc_value_msat: u64,
1051 msg: Option<msgs::UpdateFulfillHTLC>,
1056 /// The return type of get_update_fulfill_htlc_and_commit.
1057 pub enum UpdateFulfillCommitFetch {
1058 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1059 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1060 /// previously placed in the holding cell (and has since been removed).
1062 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1063 monitor_update: ChannelMonitorUpdate,
1064 /// The value of the HTLC which was claimed, in msat.
1065 htlc_value_msat: u64,
1067 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1068 /// or has been forgotten (presumably previously claimed).
1072 /// The return value of `monitor_updating_restored`
1073 pub(super) struct MonitorRestoreUpdates {
1074 pub raa: Option<msgs::RevokeAndACK>,
1075 pub commitment_update: Option<msgs::CommitmentUpdate>,
1076 pub order: RAACommitmentOrder,
1077 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1078 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1079 pub finalized_claimed_htlcs: Vec<HTLCSource>,
1080 pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1081 pub funding_broadcastable: Option<Transaction>,
1082 pub channel_ready: Option<msgs::ChannelReady>,
1083 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1086 /// The return value of `signer_maybe_unblocked`
1088 pub(super) struct SignerResumeUpdates {
1089 pub commitment_update: Option<msgs::CommitmentUpdate>,
1090 pub funding_signed: Option<msgs::FundingSigned>,
1091 pub channel_ready: Option<msgs::ChannelReady>,
1094 /// The return value of `channel_reestablish`
1095 pub(super) struct ReestablishResponses {
1096 pub channel_ready: Option<msgs::ChannelReady>,
1097 pub raa: Option<msgs::RevokeAndACK>,
1098 pub commitment_update: Option<msgs::CommitmentUpdate>,
1099 pub order: RAACommitmentOrder,
1100 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1101 pub shutdown_msg: Option<msgs::Shutdown>,
1104 /// The result of a shutdown that should be handled.
1106 pub(crate) struct ShutdownResult {
1107 pub(crate) closure_reason: ClosureReason,
1108 /// A channel monitor update to apply.
1109 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1110 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1111 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1112 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1113 /// propagated to the remainder of the batch.
1114 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1115 pub(crate) channel_id: ChannelId,
1116 pub(crate) user_channel_id: u128,
1117 pub(crate) channel_capacity_satoshis: u64,
1118 pub(crate) counterparty_node_id: PublicKey,
1119 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1120 pub(crate) channel_funding_txo: Option<OutPoint>,
1123 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1124 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1125 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1126 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1127 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1128 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1129 /// by this multiple without hitting this case, before sending.
1130 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1131 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1132 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1133 /// leave the channel less usable as we hold a bigger reserve.
1134 #[cfg(any(fuzzing, test))]
1135 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1136 #[cfg(not(any(fuzzing, test)))]
1137 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1139 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1140 /// channel creation on an inbound channel, we simply force-close and move on.
1141 /// This constant is the one suggested in BOLT 2.
1142 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1144 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1145 /// not have enough balance value remaining to cover the onchain cost of this new
1146 /// HTLC weight. If this happens, our counterparty fails the reception of our
1147 /// commitment_signed including this new HTLC due to infringement on the channel
1149 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1150 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1151 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1152 /// design of LN state machines, allowing asynchronous updates.
1153 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1155 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1156 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1157 /// transaction (not counting the value of the HTLCs themselves).
1158 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1160 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1161 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1162 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1164 /// * The expected interval between ticks (1 minute).
1165 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1166 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1167 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1168 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1170 /// The number of ticks that may elapse while we're waiting for a response to a
1171 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1174 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1175 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1177 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1178 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1179 /// exceeding this age limit will be force-closed and purged from memory.
1180 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1182 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1183 pub(crate) const COINBASE_MATURITY: u32 = 100;
1185 struct PendingChannelMonitorUpdate {
1186 update: ChannelMonitorUpdate,
1189 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1190 (0, update, required),
1193 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1194 /// its variants containing an appropriate channel struct.
1195 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1196 UnfundedOutboundV1(OutboundV1Channel<SP>),
1197 UnfundedInboundV1(InboundV1Channel<SP>),
1198 #[cfg(any(dual_funding, splicing))]
1199 UnfundedOutboundV2(OutboundV2Channel<SP>),
1200 #[cfg(any(dual_funding, splicing))]
1201 UnfundedInboundV2(InboundV2Channel<SP>),
1202 Funded(Channel<SP>),
1205 impl<'a, SP: Deref> ChannelPhase<SP> where
1206 SP::Target: SignerProvider,
1207 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1209 pub fn context(&'a self) -> &'a ChannelContext<SP> {
1211 ChannelPhase::Funded(chan) => &chan.context,
1212 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1213 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1214 #[cfg(any(dual_funding, splicing))]
1215 ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
1216 #[cfg(any(dual_funding, splicing))]
1217 ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
1221 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1223 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1224 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1225 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1226 #[cfg(any(dual_funding, splicing))]
1227 ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
1228 #[cfg(any(dual_funding, splicing))]
1229 ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
1234 /// Contains all state common to unfunded inbound/outbound channels.
1235 pub(super) struct UnfundedChannelContext {
1236 /// A counter tracking how many ticks have elapsed since this unfunded channel was
1237 /// created. If this unfunded channel reaches peer has yet to respond after reaching
1238 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1240 /// This is so that we don't keep channels around that haven't progressed to a funded state
1241 /// in a timely manner.
1242 unfunded_channel_age_ticks: usize,
1245 impl UnfundedChannelContext {
1246 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1247 /// having reached the unfunded channel age limit.
1249 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1250 pub fn should_expire_unfunded_channel(&mut self) -> bool {
1251 self.unfunded_channel_age_ticks += 1;
1252 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1256 /// Contains everything about the channel including state, and various flags.
1257 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1258 config: LegacyChannelConfig,
1260 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1261 // constructed using it. The second element in the tuple corresponds to the number of ticks that
1262 // have elapsed since the update occurred.
1263 prev_config: Option<(ChannelConfig, usize)>,
1265 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1269 /// The current channel ID.
1270 channel_id: ChannelId,
1271 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1272 /// Will be `None` for channels created prior to 0.0.115.
1273 temporary_channel_id: Option<ChannelId>,
1274 channel_state: ChannelState,
1276 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1277 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1279 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1280 // Note that a number of our tests were written prior to the behavior here which retransmits
1281 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1283 #[cfg(any(test, feature = "_test_utils"))]
1284 pub(crate) announcement_sigs_state: AnnouncementSigsState,
1285 #[cfg(not(any(test, feature = "_test_utils")))]
1286 announcement_sigs_state: AnnouncementSigsState,
1288 secp_ctx: Secp256k1<secp256k1::All>,
1289 channel_value_satoshis: u64,
1291 latest_monitor_update_id: u64,
1293 holder_signer: ChannelSignerType<SP>,
1294 shutdown_scriptpubkey: Option<ShutdownScript>,
1295 destination_script: ScriptBuf,
1297 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1298 // generation start at 0 and count up...this simplifies some parts of implementation at the
1299 // cost of others, but should really just be changed.
1301 cur_holder_commitment_transaction_number: u64,
1302 cur_counterparty_commitment_transaction_number: u64,
1303 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1304 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1305 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1306 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1308 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1309 /// need to ensure we resend them in the order we originally generated them. Note that because
1310 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1311 /// sufficient to simply set this to the opposite of any message we are generating as we
1312 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1313 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1315 resend_order: RAACommitmentOrder,
1317 monitor_pending_channel_ready: bool,
1318 monitor_pending_revoke_and_ack: bool,
1319 monitor_pending_commitment_signed: bool,
1321 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1322 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1323 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1324 // but need to handle this somehow or we run the risk of losing HTLCs!
1325 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1326 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1327 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1328 monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1330 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1331 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1332 /// the future when the signer indicates it may have a signature for us.
1334 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1335 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1336 signer_pending_commitment_update: bool,
1337 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1338 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1339 /// outbound or inbound.
1340 signer_pending_funding: bool,
1342 // pending_update_fee is filled when sending and receiving update_fee.
1344 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1345 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1346 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1347 // HTLCs with similar state.
1348 pending_update_fee: Option<(u32, FeeUpdateState)>,
1349 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1350 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1351 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1352 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1353 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1354 holding_cell_update_fee: Option<u32>,
1355 next_holder_htlc_id: u64,
1356 next_counterparty_htlc_id: u64,
1357 feerate_per_kw: u32,
1359 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1360 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1361 /// new block is received, ensuring it's always at least moderately close to the current real
1363 update_time_counter: u32,
1365 #[cfg(debug_assertions)]
1366 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1367 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1368 #[cfg(debug_assertions)]
1369 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1370 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1372 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1373 target_closing_feerate_sats_per_kw: Option<u32>,
1375 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1376 /// update, we need to delay processing it until later. We do that here by simply storing the
1377 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1378 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1380 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1381 /// transaction. These are set once we reach `closing_negotiation_ready`.
1383 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1385 closing_fee_limits: Option<(u64, u64)>,
1387 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1388 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1389 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1390 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1391 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1393 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1394 /// until we see a `commitment_signed` before doing so.
1396 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1397 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1398 expecting_peer_commitment_signed: bool,
1400 /// The hash of the block in which the funding transaction was included.
1401 funding_tx_confirmed_in: Option<BlockHash>,
1402 funding_tx_confirmation_height: u32,
1403 short_channel_id: Option<u64>,
1404 /// Either the height at which this channel was created or the height at which it was last
1405 /// serialized if it was serialized by versions prior to 0.0.103.
1406 /// We use this to close if funding is never broadcasted.
1407 pub(super) channel_creation_height: u32,
1409 counterparty_dust_limit_satoshis: u64,
1412 pub(super) holder_dust_limit_satoshis: u64,
1414 holder_dust_limit_satoshis: u64,
1417 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1419 counterparty_max_htlc_value_in_flight_msat: u64,
1422 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1424 holder_max_htlc_value_in_flight_msat: u64,
1426 /// minimum channel reserve for self to maintain - set by them.
1427 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1430 pub(super) holder_selected_channel_reserve_satoshis: u64,
1432 holder_selected_channel_reserve_satoshis: u64,
1434 counterparty_htlc_minimum_msat: u64,
1435 holder_htlc_minimum_msat: u64,
1437 pub counterparty_max_accepted_htlcs: u16,
1439 counterparty_max_accepted_htlcs: u16,
1440 holder_max_accepted_htlcs: u16,
1441 minimum_depth: Option<u32>,
1443 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1445 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1446 funding_transaction: Option<Transaction>,
1447 is_batch_funding: Option<()>,
1449 counterparty_cur_commitment_point: Option<PublicKey>,
1450 counterparty_prev_commitment_point: Option<PublicKey>,
1451 counterparty_node_id: PublicKey,
1453 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1455 commitment_secrets: CounterpartyCommitmentSecrets,
1457 channel_update_status: ChannelUpdateStatus,
1458 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1459 /// not complete within a single timer tick (one minute), we should force-close the channel.
1460 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1462 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1463 /// our peer and start the closing_signed negotiation fresh.
1464 closing_signed_in_flight: bool,
1466 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1467 /// This can be used to rebroadcast the channel_announcement message later.
1468 announcement_sigs: Option<(Signature, Signature)>,
1470 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1471 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1472 // be, by comparing the cached values to the fee of the tranaction generated by
1473 // `build_commitment_transaction`.
1474 #[cfg(any(test, fuzzing))]
1475 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1476 #[cfg(any(test, fuzzing))]
1477 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1479 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1480 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1481 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1482 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1483 /// message until we receive a channel_reestablish.
1485 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1486 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1488 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1489 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1490 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1491 /// unblock the state machine.
1493 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1494 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1495 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1497 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1498 /// [`msgs::RevokeAndACK`] message from the counterparty.
1499 sent_message_awaiting_response: Option<usize>,
1501 #[cfg(any(test, fuzzing))]
1502 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1503 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1504 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1505 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1506 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1507 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1508 historical_inbound_htlc_fulfills: HashSet<u64>,
1510 /// This channel's type, as negotiated during channel open
1511 channel_type: ChannelTypeFeatures,
1513 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1514 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1515 // the channel's funding UTXO.
1517 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1518 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1519 // associated channel mapping.
1521 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1522 // to store all of them.
1523 latest_inbound_scid_alias: Option<u64>,
1525 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1526 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1527 // don't currently support node id aliases and eventually privacy should be provided with
1528 // blinded paths instead of simple scid+node_id aliases.
1529 outbound_scid_alias: u64,
1531 // We track whether we already emitted a `ChannelPending` event.
1532 channel_pending_event_emitted: bool,
1534 // We track whether we already emitted a `ChannelReady` event.
1535 channel_ready_event_emitted: bool,
1537 /// Some if we initiated to shut down the channel.
1538 local_initiated_shutdown: Option<()>,
1540 /// The unique identifier used to re-derive the private key material for the channel through
1541 /// [`SignerProvider::derive_channel_signer`].
1543 channel_keys_id: [u8; 32],
1545 pub channel_keys_id: [u8; 32],
1547 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1548 /// store it here and only release it to the `ChannelManager` once it asks for it.
1549 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1552 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1553 fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1554 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1555 entropy_source: &'a ES,
1556 signer_provider: &'a SP,
1557 counterparty_node_id: PublicKey,
1558 their_features: &'a InitFeatures,
1560 config: &'a UserConfig,
1561 current_chain_height: u32,
1564 our_funding_satoshis: u64,
1565 counterparty_pubkeys: ChannelPublicKeys,
1566 channel_type: ChannelTypeFeatures,
1567 holder_selected_channel_reserve_satoshis: u64,
1568 msg_channel_reserve_satoshis: u64,
1570 open_channel_fields: msgs::CommonOpenChannelFields,
1571 ) -> Result<ChannelContext<SP>, ChannelError>
1573 ES::Target: EntropySource,
1574 F::Target: FeeEstimator,
1576 SP::Target: SignerProvider,
1578 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None);
1579 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1581 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1583 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1584 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1585 let pubkeys = holder_signer.pubkeys().clone();
1587 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1588 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1591 // Check sanity of message fields:
1592 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1593 return Err(ChannelError::Close(format!(
1594 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1595 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1596 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1598 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1599 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1601 if msg_channel_reserve_satoshis > channel_value_satoshis {
1602 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1604 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1605 if msg_push_msat > full_channel_value_msat {
1606 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1608 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1609 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1611 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1612 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1614 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1616 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1617 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1618 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1620 if open_channel_fields.max_accepted_htlcs < 1 {
1621 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1623 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1624 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1627 // Now check against optional parameters as set by config...
1628 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1629 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1631 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1632 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1634 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1635 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1637 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1638 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1640 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1641 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1643 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1644 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1646 if open_channel_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1647 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1650 // Convert things into internal flags and prep our state:
1652 if config.channel_handshake_limits.force_announced_channel_preference {
1653 if config.channel_handshake_config.announced_channel != announced_channel {
1654 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1658 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1659 // Protocol level safety check in place, although it should never happen because
1660 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1661 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1663 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1664 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1666 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1667 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1668 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1670 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1671 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1674 // check if the funder's amount for the initial commitment tx is sufficient
1675 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1676 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1677 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1681 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1682 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1683 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1684 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1687 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1688 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1689 // want to push much to us), our counterparty should always have more than our reserve.
1690 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1691 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1694 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1695 match &open_channel_fields.shutdown_scriptpubkey {
1696 &Some(ref script) => {
1697 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1698 if script.len() == 0 {
1701 if !script::is_bolt2_compliant(&script, their_features) {
1702 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1704 Some(script.clone())
1707 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1709 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1714 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1715 match signer_provider.get_shutdown_scriptpubkey() {
1716 Ok(scriptpubkey) => Some(scriptpubkey),
1717 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1721 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1722 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1723 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1727 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1728 Ok(script) => script,
1729 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1732 let mut secp_ctx = Secp256k1::new();
1733 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1735 let minimum_depth = if is_0conf {
1738 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1741 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1743 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1745 let channel_context = ChannelContext {
1748 config: LegacyChannelConfig {
1749 options: config.channel_config.clone(),
1751 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1756 inbound_handshake_limits_override: None,
1758 temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1759 channel_id: open_channel_fields.temporary_channel_id,
1760 channel_state: ChannelState::NegotiatingFunding(
1761 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1763 announcement_sigs_state: AnnouncementSigsState::NotSent,
1766 latest_monitor_update_id: 0,
1768 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1769 shutdown_scriptpubkey,
1772 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1773 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1776 pending_inbound_htlcs: Vec::new(),
1777 pending_outbound_htlcs: Vec::new(),
1778 holding_cell_htlc_updates: Vec::new(),
1779 pending_update_fee: None,
1780 holding_cell_update_fee: None,
1781 next_holder_htlc_id: 0,
1782 next_counterparty_htlc_id: 0,
1783 update_time_counter: 1,
1785 resend_order: RAACommitmentOrder::CommitmentFirst,
1787 monitor_pending_channel_ready: false,
1788 monitor_pending_revoke_and_ack: false,
1789 monitor_pending_commitment_signed: false,
1790 monitor_pending_forwards: Vec::new(),
1791 monitor_pending_failures: Vec::new(),
1792 monitor_pending_finalized_fulfills: Vec::new(),
1793 monitor_pending_update_adds: Vec::new(),
1795 signer_pending_commitment_update: false,
1796 signer_pending_funding: false,
1799 #[cfg(debug_assertions)]
1800 holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1801 #[cfg(debug_assertions)]
1802 counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1804 last_sent_closing_fee: None,
1805 pending_counterparty_closing_signed: None,
1806 expecting_peer_commitment_signed: false,
1807 closing_fee_limits: None,
1808 target_closing_feerate_sats_per_kw: None,
1810 funding_tx_confirmed_in: None,
1811 funding_tx_confirmation_height: 0,
1812 short_channel_id: None,
1813 channel_creation_height: current_chain_height,
1815 feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1816 channel_value_satoshis,
1817 counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1818 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1819 counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1820 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1821 counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1822 holder_selected_channel_reserve_satoshis,
1823 counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1824 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1825 counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1826 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1829 counterparty_forwarding_info: None,
1831 channel_transaction_parameters: ChannelTransactionParameters {
1832 holder_pubkeys: pubkeys,
1833 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1834 is_outbound_from_holder: false,
1835 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1836 selected_contest_delay: open_channel_fields.to_self_delay,
1837 pubkeys: counterparty_pubkeys,
1839 funding_outpoint: None,
1840 channel_type_features: channel_type.clone()
1842 funding_transaction: None,
1843 is_batch_funding: None,
1845 counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1846 counterparty_prev_commitment_point: None,
1847 counterparty_node_id,
1849 counterparty_shutdown_scriptpubkey,
1851 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1853 channel_update_status: ChannelUpdateStatus::Enabled,
1854 closing_signed_in_flight: false,
1856 announcement_sigs: None,
1858 #[cfg(any(test, fuzzing))]
1859 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1860 #[cfg(any(test, fuzzing))]
1861 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1863 workaround_lnd_bug_4006: None,
1864 sent_message_awaiting_response: None,
1866 latest_inbound_scid_alias: None,
1867 outbound_scid_alias: 0,
1869 channel_pending_event_emitted: false,
1870 channel_ready_event_emitted: false,
1872 #[cfg(any(test, fuzzing))]
1873 historical_inbound_htlc_fulfills: new_hash_set(),
1878 local_initiated_shutdown: None,
1880 blocked_monitor_updates: Vec::new(),
1886 fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1887 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1888 entropy_source: &'a ES,
1889 signer_provider: &'a SP,
1890 counterparty_node_id: PublicKey,
1891 their_features: &'a InitFeatures,
1892 funding_satoshis: u64,
1895 config: &'a UserConfig,
1896 current_chain_height: u32,
1897 outbound_scid_alias: u64,
1898 temporary_channel_id: Option<ChannelId>,
1899 holder_selected_channel_reserve_satoshis: u64,
1900 channel_keys_id: [u8; 32],
1901 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1902 pubkeys: ChannelPublicKeys,
1903 ) -> Result<ChannelContext<SP>, APIError>
1905 ES::Target: EntropySource,
1906 F::Target: FeeEstimator,
1907 SP::Target: SignerProvider,
1909 // This will be updated with the counterparty contribution if this is a dual-funded channel
1910 let channel_value_satoshis = funding_satoshis;
1912 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1914 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1915 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1917 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1918 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1920 let channel_value_msat = channel_value_satoshis * 1000;
1921 if push_msat > channel_value_msat {
1922 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1924 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1925 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1928 let channel_type = get_initial_channel_type(&config, their_features);
1929 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1931 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1932 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1934 (ConfirmationTarget::NonAnchorChannelFee, 0)
1936 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1938 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1939 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1940 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1941 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1944 let mut secp_ctx = Secp256k1::new();
1945 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1947 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1948 match signer_provider.get_shutdown_scriptpubkey() {
1949 Ok(scriptpubkey) => Some(scriptpubkey),
1950 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1954 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1955 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1956 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1960 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1961 Ok(script) => script,
1962 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1965 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1970 config: LegacyChannelConfig {
1971 options: config.channel_config.clone(),
1972 announced_channel: config.channel_handshake_config.announced_channel,
1973 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1978 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1980 channel_id: temporary_channel_id,
1981 temporary_channel_id: Some(temporary_channel_id),
1982 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1983 announcement_sigs_state: AnnouncementSigsState::NotSent,
1985 // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
1986 channel_value_satoshis,
1988 latest_monitor_update_id: 0,
1990 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1991 shutdown_scriptpubkey,
1994 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1995 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1998 pending_inbound_htlcs: Vec::new(),
1999 pending_outbound_htlcs: Vec::new(),
2000 holding_cell_htlc_updates: Vec::new(),
2001 pending_update_fee: None,
2002 holding_cell_update_fee: None,
2003 next_holder_htlc_id: 0,
2004 next_counterparty_htlc_id: 0,
2005 update_time_counter: 1,
2007 resend_order: RAACommitmentOrder::CommitmentFirst,
2009 monitor_pending_channel_ready: false,
2010 monitor_pending_revoke_and_ack: false,
2011 monitor_pending_commitment_signed: false,
2012 monitor_pending_forwards: Vec::new(),
2013 monitor_pending_failures: Vec::new(),
2014 monitor_pending_finalized_fulfills: Vec::new(),
2015 monitor_pending_update_adds: Vec::new(),
2017 signer_pending_commitment_update: false,
2018 signer_pending_funding: false,
2020 // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
2021 // when we receive `accept_channel2`.
2022 #[cfg(debug_assertions)]
2023 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2024 #[cfg(debug_assertions)]
2025 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2027 last_sent_closing_fee: None,
2028 pending_counterparty_closing_signed: None,
2029 expecting_peer_commitment_signed: false,
2030 closing_fee_limits: None,
2031 target_closing_feerate_sats_per_kw: None,
2033 funding_tx_confirmed_in: None,
2034 funding_tx_confirmation_height: 0,
2035 short_channel_id: None,
2036 channel_creation_height: current_chain_height,
2038 feerate_per_kw: commitment_feerate,
2039 counterparty_dust_limit_satoshis: 0,
2040 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
2041 counterparty_max_htlc_value_in_flight_msat: 0,
2042 // We'll adjust this to include our counterparty's `funding_satoshis` when we
2043 // receive `accept_channel2`.
2044 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
2045 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
2046 holder_selected_channel_reserve_satoshis,
2047 counterparty_htlc_minimum_msat: 0,
2048 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2049 counterparty_max_accepted_htlcs: 0,
2050 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2051 minimum_depth: None, // Filled in in accept_channel
2053 counterparty_forwarding_info: None,
2055 channel_transaction_parameters: ChannelTransactionParameters {
2056 holder_pubkeys: pubkeys,
2057 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2058 is_outbound_from_holder: true,
2059 counterparty_parameters: None,
2060 funding_outpoint: None,
2061 channel_type_features: channel_type.clone()
2063 funding_transaction: None,
2064 is_batch_funding: None,
2066 counterparty_cur_commitment_point: None,
2067 counterparty_prev_commitment_point: None,
2068 counterparty_node_id,
2070 counterparty_shutdown_scriptpubkey: None,
2072 commitment_secrets: CounterpartyCommitmentSecrets::new(),
2074 channel_update_status: ChannelUpdateStatus::Enabled,
2075 closing_signed_in_flight: false,
2077 announcement_sigs: None,
2079 #[cfg(any(test, fuzzing))]
2080 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2081 #[cfg(any(test, fuzzing))]
2082 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2084 workaround_lnd_bug_4006: None,
2085 sent_message_awaiting_response: None,
2087 latest_inbound_scid_alias: None,
2088 outbound_scid_alias,
2090 channel_pending_event_emitted: false,
2091 channel_ready_event_emitted: false,
2093 #[cfg(any(test, fuzzing))]
2094 historical_inbound_htlc_fulfills: new_hash_set(),
2099 blocked_monitor_updates: Vec::new(),
2100 local_initiated_shutdown: None,
2104 /// Allowed in any state (including after shutdown)
2105 pub fn get_update_time_counter(&self) -> u32 {
2106 self.update_time_counter
2109 pub fn get_latest_monitor_update_id(&self) -> u64 {
2110 self.latest_monitor_update_id
2113 pub fn should_announce(&self) -> bool {
2114 self.config.announced_channel
2117 pub fn is_outbound(&self) -> bool {
2118 self.channel_transaction_parameters.is_outbound_from_holder
2121 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2122 /// Allowed in any state (including after shutdown)
2123 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2124 self.config.options.forwarding_fee_base_msat
2127 /// Returns true if we've ever received a message from the remote end for this Channel
2128 pub fn have_received_message(&self) -> bool {
2129 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2132 /// Returns true if this channel is fully established and not known to be closing.
2133 /// Allowed in any state (including after shutdown)
2134 pub fn is_usable(&self) -> bool {
2135 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2136 !self.channel_state.is_local_shutdown_sent() &&
2137 !self.channel_state.is_remote_shutdown_sent() &&
2138 !self.monitor_pending_channel_ready
2141 /// shutdown state returns the state of the channel in its various stages of shutdown
2142 pub fn shutdown_state(&self) -> ChannelShutdownState {
2143 match self.channel_state {
2144 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2145 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2146 ChannelShutdownState::ShutdownInitiated
2147 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2148 ChannelShutdownState::ResolvingHTLCs
2149 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2150 ChannelShutdownState::NegotiatingClosingFee
2152 ChannelShutdownState::NotShuttingDown
2154 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2155 _ => ChannelShutdownState::NotShuttingDown,
2159 fn closing_negotiation_ready(&self) -> bool {
2160 let is_ready_to_close = match self.channel_state {
2161 ChannelState::AwaitingChannelReady(flags) =>
2162 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2163 ChannelState::ChannelReady(flags) =>
2164 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2167 self.pending_inbound_htlcs.is_empty() &&
2168 self.pending_outbound_htlcs.is_empty() &&
2169 self.pending_update_fee.is_none() &&
2173 /// Returns true if this channel is currently available for use. This is a superset of
2174 /// is_usable() and considers things like the channel being temporarily disabled.
2175 /// Allowed in any state (including after shutdown)
2176 pub fn is_live(&self) -> bool {
2177 self.is_usable() && !self.channel_state.is_peer_disconnected()
2180 // Public utilities:
2182 pub fn channel_id(&self) -> ChannelId {
2186 // Return the `temporary_channel_id` used during channel establishment.
2188 // Will return `None` for channels created prior to LDK version 0.0.115.
2189 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2190 self.temporary_channel_id
2193 pub fn minimum_depth(&self) -> Option<u32> {
2197 /// Gets the "user_id" value passed into the construction of this channel. It has no special
2198 /// meaning and exists only to allow users to have a persistent identifier of a channel.
2199 pub fn get_user_id(&self) -> u128 {
2203 /// Gets the channel's type
2204 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2208 /// Gets the channel's `short_channel_id`.
2210 /// Will return `None` if the channel hasn't been confirmed yet.
2211 pub fn get_short_channel_id(&self) -> Option<u64> {
2212 self.short_channel_id
2215 /// Allowed in any state (including after shutdown)
2216 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2217 self.latest_inbound_scid_alias
2220 /// Allowed in any state (including after shutdown)
2221 pub fn outbound_scid_alias(&self) -> u64 {
2222 self.outbound_scid_alias
2225 /// Returns the holder signer for this channel.
2227 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2228 return &self.holder_signer
2231 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2232 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2233 /// or prior to any channel actions during `Channel` initialization.
2234 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2235 debug_assert_eq!(self.outbound_scid_alias, 0);
2236 self.outbound_scid_alias = outbound_scid_alias;
2239 /// Returns the funding_txo we either got from our peer, or were given by
2240 /// get_funding_created.
2241 pub fn get_funding_txo(&self) -> Option<OutPoint> {
2242 self.channel_transaction_parameters.funding_outpoint
2245 /// Returns the height in which our funding transaction was confirmed.
2246 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2247 let conf_height = self.funding_tx_confirmation_height;
2248 if conf_height > 0 {
2255 /// Returns the block hash in which our funding transaction was confirmed.
2256 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2257 self.funding_tx_confirmed_in
2260 /// Returns the current number of confirmations on the funding transaction.
2261 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2262 if self.funding_tx_confirmation_height == 0 {
2263 // We either haven't seen any confirmation yet, or observed a reorg.
2267 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2270 fn get_holder_selected_contest_delay(&self) -> u16 {
2271 self.channel_transaction_parameters.holder_selected_contest_delay
2274 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2275 &self.channel_transaction_parameters.holder_pubkeys
2278 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2279 self.channel_transaction_parameters.counterparty_parameters
2280 .as_ref().map(|params| params.selected_contest_delay)
2283 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2284 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2287 /// Allowed in any state (including after shutdown)
2288 pub fn get_counterparty_node_id(&self) -> PublicKey {
2289 self.counterparty_node_id
2292 /// Allowed in any state (including after shutdown)
2293 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2294 self.holder_htlc_minimum_msat
2297 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2298 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2299 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2302 /// Allowed in any state (including after shutdown)
2303 pub fn get_announced_htlc_max_msat(&self) -> u64 {
2305 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2306 // to use full capacity. This is an effort to reduce routing failures, because in many cases
2307 // channel might have been used to route very small values (either by honest users or as DoS).
2308 self.channel_value_satoshis * 1000 * 9 / 10,
2310 self.counterparty_max_htlc_value_in_flight_msat
2314 /// Allowed in any state (including after shutdown)
2315 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2316 self.counterparty_htlc_minimum_msat
2319 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2320 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2321 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2324 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2325 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2326 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2328 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2329 party_max_htlc_value_in_flight_msat
2334 pub fn get_value_satoshis(&self) -> u64 {
2335 self.channel_value_satoshis
2338 pub fn get_fee_proportional_millionths(&self) -> u32 {
2339 self.config.options.forwarding_fee_proportional_millionths
2342 pub fn get_cltv_expiry_delta(&self) -> u16 {
2343 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2346 fn get_dust_exposure_limiting_feerate<F: Deref>(&self,
2347 fee_estimator: &LowerBoundedFeeEstimator<F>,
2348 ) -> u32 where F::Target: FeeEstimator {
2349 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep)
2352 pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 {
2353 match self.config.options.max_dust_htlc_exposure {
2354 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2355 (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier)
2357 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2361 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2362 pub fn prev_config(&self) -> Option<ChannelConfig> {
2363 self.prev_config.map(|prev_config| prev_config.0)
2366 // Checks whether we should emit a `ChannelPending` event.
2367 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2368 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2371 // Returns whether we already emitted a `ChannelPending` event.
2372 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2373 self.channel_pending_event_emitted
2376 // Remembers that we already emitted a `ChannelPending` event.
2377 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2378 self.channel_pending_event_emitted = true;
2381 // Checks whether we should emit a `ChannelReady` event.
2382 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2383 self.is_usable() && !self.channel_ready_event_emitted
2386 // Remembers that we already emitted a `ChannelReady` event.
2387 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2388 self.channel_ready_event_emitted = true;
2391 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2392 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2393 /// no longer be considered when forwarding HTLCs.
2394 pub fn maybe_expire_prev_config(&mut self) {
2395 if self.prev_config.is_none() {
2398 let prev_config = self.prev_config.as_mut().unwrap();
2400 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2401 self.prev_config = None;
2405 /// Returns the current [`ChannelConfig`] applied to the channel.
2406 pub fn config(&self) -> ChannelConfig {
2410 /// Updates the channel's config. A bool is returned indicating whether the config update
2411 /// applied resulted in a new ChannelUpdate message.
2412 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2413 let did_channel_update =
2414 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2415 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2416 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2417 if did_channel_update {
2418 self.prev_config = Some((self.config.options, 0));
2419 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2420 // policy change to propagate throughout the network.
2421 self.update_time_counter += 1;
2423 self.config.options = *config;
2427 /// Returns true if funding_signed was sent/received and the
2428 /// funding transaction has been broadcast if necessary.
2429 pub fn is_funding_broadcast(&self) -> bool {
2430 !self.channel_state.is_pre_funded_state() &&
2431 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2434 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2435 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2436 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2437 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2438 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2440 /// @local is used only to convert relevant internal structures which refer to remote vs local
2441 /// to decide value of outputs and direction of HTLCs.
2442 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2443 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2444 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2445 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2446 /// which peer generated this transaction and "to whom" this transaction flows.
2448 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2449 where L::Target: Logger
2451 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2452 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2453 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2455 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2456 let mut remote_htlc_total_msat = 0;
2457 let mut local_htlc_total_msat = 0;
2458 let mut value_to_self_msat_offset = 0;
2460 let mut feerate_per_kw = self.feerate_per_kw;
2461 if let Some((feerate, update_state)) = self.pending_update_fee {
2462 if match update_state {
2463 // Note that these match the inclusion criteria when scanning
2464 // pending_inbound_htlcs below.
2465 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2466 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2467 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
2469 feerate_per_kw = feerate;
2473 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2474 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2475 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2477 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2479 macro_rules! get_htlc_in_commitment {
2480 ($htlc: expr, $offered: expr) => {
2481 HTLCOutputInCommitment {
2483 amount_msat: $htlc.amount_msat,
2484 cltv_expiry: $htlc.cltv_expiry,
2485 payment_hash: $htlc.payment_hash,
2486 transaction_output_index: None
2491 macro_rules! add_htlc_output {
2492 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2493 if $outbound == local { // "offered HTLC output"
2494 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2495 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2498 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2500 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2501 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2502 included_non_dust_htlcs.push((htlc_in_tx, $source));
2504 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2505 included_dust_htlcs.push((htlc_in_tx, $source));
2508 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2509 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2512 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2514 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2515 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2516 included_non_dust_htlcs.push((htlc_in_tx, $source));
2518 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2519 included_dust_htlcs.push((htlc_in_tx, $source));
2525 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2527 for ref htlc in self.pending_inbound_htlcs.iter() {
2528 let (include, state_name) = match htlc.state {
2529 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2530 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2531 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2532 InboundHTLCState::Committed => (true, "Committed"),
2533 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2537 add_htlc_output!(htlc, false, None, state_name);
2538 remote_htlc_total_msat += htlc.amount_msat;
2540 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2542 &InboundHTLCState::LocalRemoved(ref reason) => {
2543 if generated_by_local {
2544 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2545 inbound_htlc_preimages.push(preimage);
2546 value_to_self_msat_offset += htlc.amount_msat as i64;
2556 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2558 for ref htlc in self.pending_outbound_htlcs.iter() {
2559 let (include, state_name) = match htlc.state {
2560 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2561 OutboundHTLCState::Committed => (true, "Committed"),
2562 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2563 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2564 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2567 let preimage_opt = match htlc.state {
2568 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2569 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2570 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2574 if let Some(preimage) = preimage_opt {
2575 outbound_htlc_preimages.push(preimage);
2579 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2580 local_htlc_total_msat += htlc.amount_msat;
2582 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2584 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2585 value_to_self_msat_offset -= htlc.amount_msat as i64;
2587 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2588 if !generated_by_local {
2589 value_to_self_msat_offset -= htlc.amount_msat as i64;
2597 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2598 assert!(value_to_self_msat >= 0);
2599 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2600 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2601 // "violate" their reserve value by couting those against it. Thus, we have to convert
2602 // everything to i64 before subtracting as otherwise we can overflow.
2603 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2604 assert!(value_to_remote_msat >= 0);
2606 #[cfg(debug_assertions)]
2608 // Make sure that the to_self/to_remote is always either past the appropriate
2609 // channel_reserve *or* it is making progress towards it.
2610 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2611 self.holder_max_commitment_tx_output.lock().unwrap()
2613 self.counterparty_max_commitment_tx_output.lock().unwrap()
2615 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2616 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2617 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2618 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2621 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2622 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2623 let (value_to_self, value_to_remote) = if self.is_outbound() {
2624 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2626 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2629 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2630 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2631 let (funding_pubkey_a, funding_pubkey_b) = if local {
2632 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2634 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2637 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2638 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2643 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2644 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2649 let num_nondust_htlcs = included_non_dust_htlcs.len();
2651 let channel_parameters =
2652 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2653 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2654 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2661 &mut included_non_dust_htlcs,
2664 let mut htlcs_included = included_non_dust_htlcs;
2665 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2666 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2667 htlcs_included.append(&mut included_dust_htlcs);
2675 local_balance_msat: value_to_self_msat as u64,
2676 remote_balance_msat: value_to_remote_msat as u64,
2677 inbound_htlc_preimages,
2678 outbound_htlc_preimages,
2683 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2684 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2685 /// our counterparty!)
2686 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2687 /// TODO Some magic rust shit to compile-time check this?
2688 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2689 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2690 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2691 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2692 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2694 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2698 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2699 /// will sign and send to our counterparty.
2700 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2701 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2702 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2703 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2704 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2706 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2709 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2710 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2711 /// Panics if called before accept_channel/InboundV1Channel::new
2712 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2713 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2716 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2717 &self.get_counterparty_pubkeys().funding_pubkey
2720 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2724 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2725 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2726 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2727 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2728 // more dust balance if the feerate increases when we have several HTLCs pending
2729 // which are near the dust limit.
2730 let mut feerate_per_kw = self.feerate_per_kw;
2731 // If there's a pending update fee, use it to ensure we aren't under-estimating
2732 // potential feerate updates coming soon.
2733 if let Some((feerate, _)) = self.pending_update_fee {
2734 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2736 if let Some(feerate) = outbound_feerate_update {
2737 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2739 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2740 cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2743 /// Get forwarding information for the counterparty.
2744 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2745 self.counterparty_forwarding_info.clone()
2748 /// Returns a HTLCStats about pending htlcs
2749 fn get_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>, dust_exposure_limiting_feerate: u32) -> HTLCStats {
2751 let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx();
2753 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update);
2754 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors {
2757 (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2758 dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2761 let mut on_holder_tx_dust_exposure_msat = 0;
2762 let mut on_counterparty_tx_dust_exposure_msat = 0;
2764 let mut on_counterparty_tx_offered_nondust_htlcs = 0;
2765 let mut on_counterparty_tx_accepted_nondust_htlcs = 0;
2767 let mut pending_inbound_htlcs_value_msat = 0;
2770 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2771 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2772 for ref htlc in context.pending_inbound_htlcs.iter() {
2773 pending_inbound_htlcs_value_msat += htlc.amount_msat;
2774 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2775 on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2777 on_counterparty_tx_offered_nondust_htlcs += 1;
2779 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2780 on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2785 let mut pending_outbound_htlcs_value_msat = 0;
2786 let mut outbound_holding_cell_msat = 0;
2787 let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0;
2788 let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len();
2790 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2791 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2792 for ref htlc in context.pending_outbound_htlcs.iter() {
2793 pending_outbound_htlcs_value_msat += htlc.amount_msat;
2794 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2795 on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2797 on_counterparty_tx_accepted_nondust_htlcs += 1;
2799 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2800 on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2804 for update in context.holding_cell_htlc_updates.iter() {
2805 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2806 pending_outbound_htlcs += 1;
2807 pending_outbound_htlcs_value_msat += amount_msat;
2808 outbound_holding_cell_msat += amount_msat;
2809 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2810 on_counterparty_tx_dust_exposure_msat += amount_msat;
2812 on_counterparty_tx_accepted_nondust_htlcs += 1;
2814 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2815 on_holder_tx_dust_exposure_msat += amount_msat;
2817 on_holder_tx_outbound_holding_cell_htlcs_count += 1;
2823 // Include any mining "excess" fees in the dust calculation
2824 let excess_feerate_opt = outbound_feerate_update
2825 .or(self.pending_update_fee.map(|(fee, _)| fee))
2826 .unwrap_or(self.feerate_per_kw)
2827 .checked_sub(dust_exposure_limiting_feerate);
2828 if let Some(excess_feerate) = excess_feerate_opt {
2829 let on_counterparty_tx_nondust_htlcs =
2830 on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs;
2831 on_counterparty_tx_dust_exposure_msat +=
2832 commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type);
2833 if !self.channel_type.supports_anchors_zero_fee_htlc_tx() {
2834 on_counterparty_tx_dust_exposure_msat +=
2835 on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type)
2836 * excess_feerate as u64 / 1000;
2837 on_counterparty_tx_dust_exposure_msat +=
2838 on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type)
2839 * excess_feerate as u64 / 1000;
2844 pending_inbound_htlcs: self.pending_inbound_htlcs.len(),
2845 pending_outbound_htlcs,
2846 pending_inbound_htlcs_value_msat,
2847 pending_outbound_htlcs_value_msat,
2848 on_counterparty_tx_dust_exposure_msat,
2849 on_holder_tx_dust_exposure_msat,
2850 outbound_holding_cell_msat,
2851 on_holder_tx_outbound_holding_cell_htlcs_count,
2855 /// Returns information on all pending inbound HTLCs.
2856 pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2857 let mut holding_cell_states = new_hash_map();
2858 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2859 match holding_cell_update {
2860 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2861 holding_cell_states.insert(
2863 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2866 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2867 holding_cell_states.insert(
2869 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2872 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2873 holding_cell_states.insert(
2875 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2879 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2882 let mut inbound_details = Vec::new();
2883 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2886 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2887 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2889 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2890 for htlc in self.pending_inbound_htlcs.iter() {
2891 if let Some(state_details) = (&htlc.state).into() {
2892 inbound_details.push(InboundHTLCDetails{
2893 htlc_id: htlc.htlc_id,
2894 amount_msat: htlc.amount_msat,
2895 cltv_expiry: htlc.cltv_expiry,
2896 payment_hash: htlc.payment_hash,
2897 state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2898 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2905 /// Returns information on all pending outbound HTLCs.
2906 pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2907 let mut outbound_details = Vec::new();
2908 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2911 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2912 dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2914 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2915 for htlc in self.pending_outbound_htlcs.iter() {
2916 outbound_details.push(OutboundHTLCDetails{
2917 htlc_id: Some(htlc.htlc_id),
2918 amount_msat: htlc.amount_msat,
2919 cltv_expiry: htlc.cltv_expiry,
2920 payment_hash: htlc.payment_hash,
2921 skimmed_fee_msat: htlc.skimmed_fee_msat,
2922 state: Some((&htlc.state).into()),
2923 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2926 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2927 if let HTLCUpdateAwaitingACK::AddHTLC {
2933 } = *holding_cell_update {
2934 outbound_details.push(OutboundHTLCDetails{
2936 amount_msat: amount_msat,
2937 cltv_expiry: cltv_expiry,
2938 payment_hash: payment_hash,
2939 skimmed_fee_msat: skimmed_fee_msat,
2940 state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2941 is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2948 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2949 /// Doesn't bother handling the
2950 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2951 /// corner case properly.
2952 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2953 -> AvailableBalances
2954 where F::Target: FeeEstimator
2956 let context = &self;
2957 // Note that we have to handle overflow due to the case mentioned in the docs in general
2960 let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator);
2961 let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
2963 let mut balance_msat = context.value_to_self_msat;
2964 for ref htlc in context.pending_inbound_htlcs.iter() {
2965 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2966 balance_msat += htlc.amount_msat;
2969 balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat;
2971 let outbound_capacity_msat = context.value_to_self_msat
2972 .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat)
2974 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2976 let mut available_capacity_msat = outbound_capacity_msat;
2978 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2979 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2983 if context.is_outbound() {
2984 // We should mind channel commit tx fee when computing how much of the available capacity
2985 // can be used in the next htlc. Mirrors the logic in send_htlc.
2987 // The fee depends on whether the amount we will be sending is above dust or not,
2988 // and the answer will in turn change the amount itself — making it a circular
2990 // This complicates the computation around dust-values, up to the one-htlc-value.
2991 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2992 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2993 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2996 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2997 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2998 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2999 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
3000 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3001 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3002 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3005 // We will first subtract the fee as if we were above-dust. Then, if the resulting
3006 // value ends up being below dust, we have this fee available again. In that case,
3007 // match the value to right-below-dust.
3008 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
3009 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
3010 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
3011 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
3012 debug_assert!(one_htlc_difference_msat != 0);
3013 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
3014 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
3015 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
3017 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
3020 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
3021 // sending a new HTLC won't reduce their balance below our reserve threshold.
3022 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
3023 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3024 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
3027 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
3028 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
3030 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
3031 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
3032 .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat);
3034 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
3035 // If another HTLC's fee would reduce the remote's balance below the reserve limit
3036 // we've selected for them, we can only send dust HTLCs.
3037 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
3041 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
3043 // If we get close to our maximum dust exposure, we end up in a situation where we can send
3044 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
3045 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
3046 // send above the dust limit (as the router can always overpay to meet the dust limit).
3047 let mut remaining_msat_below_dust_exposure_limit = None;
3048 let mut dust_exposure_dust_limit_msat = 0;
3049 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
3051 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3052 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
3054 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
3055 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3056 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3059 let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate);
3060 if let Some(excess_feerate) = excess_feerate_opt {
3061 let htlc_dust_exposure_msat =
3062 per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type);
3063 let nondust_htlc_counterparty_tx_dust_exposure =
3064 htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
3065 if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3066 // If adding an extra HTLC would put us over the dust limit in total fees, we cannot
3067 // send any non-dust HTLCs.
3068 available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000);
3072 if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) {
3073 // Note that we don't use the `counterparty_tx_dust_exposure` (with
3074 // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs.
3075 remaining_msat_below_dust_exposure_limit =
3076 Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat));
3077 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
3080 if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
3081 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
3082 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
3083 max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat)));
3084 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
3087 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
3088 if available_capacity_msat < dust_exposure_dust_limit_msat {
3089 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3091 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3095 available_capacity_msat = cmp::min(available_capacity_msat,
3096 context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat);
3098 if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize {
3099 available_capacity_msat = 0;
3103 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3104 - context.value_to_self_msat as i64
3105 - htlc_stats.pending_inbound_htlcs_value_msat as i64
3106 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3108 outbound_capacity_msat,
3109 next_outbound_htlc_limit_msat: available_capacity_msat,
3110 next_outbound_htlc_minimum_msat,
3115 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3116 let context = &self;
3117 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3120 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3121 /// number of pending HTLCs that are on track to be in our next commitment tx.
3123 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3124 /// `fee_spike_buffer_htlc` is `Some`.
3126 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3127 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3129 /// Dust HTLCs are excluded.
3130 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3131 let context = &self;
3132 assert!(context.is_outbound());
3134 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3137 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3138 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3140 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3141 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3143 let mut addl_htlcs = 0;
3144 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3146 HTLCInitiator::LocalOffered => {
3147 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3151 HTLCInitiator::RemoteOffered => {
3152 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3158 let mut included_htlcs = 0;
3159 for ref htlc in context.pending_inbound_htlcs.iter() {
3160 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3163 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3164 // transaction including this HTLC if it times out before they RAA.
3165 included_htlcs += 1;
3168 for ref htlc in context.pending_outbound_htlcs.iter() {
3169 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3173 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3174 OutboundHTLCState::Committed => included_htlcs += 1,
3175 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3176 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3177 // transaction won't be generated until they send us their next RAA, which will mean
3178 // dropping any HTLCs in this state.
3183 for htlc in context.holding_cell_htlc_updates.iter() {
3185 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3186 if amount_msat / 1000 < real_dust_limit_timeout_sat {
3191 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3192 // ack we're guaranteed to never include them in commitment txs anymore.
3196 let num_htlcs = included_htlcs + addl_htlcs;
3197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3198 #[cfg(any(test, fuzzing))]
3201 if fee_spike_buffer_htlc.is_some() {
3202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3205 + context.holding_cell_htlc_updates.len();
3206 let commitment_tx_info = CommitmentTxInfoCached {
3208 total_pending_htlcs,
3209 next_holder_htlc_id: match htlc.origin {
3210 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3211 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3213 next_counterparty_htlc_id: match htlc.origin {
3214 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3215 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3217 feerate: context.feerate_per_kw,
3219 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3224 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3225 /// pending HTLCs that are on track to be in their next commitment tx
3227 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3228 /// `fee_spike_buffer_htlc` is `Some`.
3230 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3231 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3233 /// Dust HTLCs are excluded.
3234 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3235 let context = &self;
3236 assert!(!context.is_outbound());
3238 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3241 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3242 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3244 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3245 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3247 let mut addl_htlcs = 0;
3248 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3250 HTLCInitiator::LocalOffered => {
3251 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3255 HTLCInitiator::RemoteOffered => {
3256 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3262 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3263 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3264 // committed outbound HTLCs, see below.
3265 let mut included_htlcs = 0;
3266 for ref htlc in context.pending_inbound_htlcs.iter() {
3267 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3270 included_htlcs += 1;
3273 for ref htlc in context.pending_outbound_htlcs.iter() {
3274 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3277 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3278 // i.e. if they've responded to us with an RAA after announcement.
3280 OutboundHTLCState::Committed => included_htlcs += 1,
3281 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3282 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3287 let num_htlcs = included_htlcs + addl_htlcs;
3288 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3289 #[cfg(any(test, fuzzing))]
3292 if fee_spike_buffer_htlc.is_some() {
3293 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3295 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3296 let commitment_tx_info = CommitmentTxInfoCached {
3298 total_pending_htlcs,
3299 next_holder_htlc_id: match htlc.origin {
3300 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3301 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3303 next_counterparty_htlc_id: match htlc.origin {
3304 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3305 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3307 feerate: context.feerate_per_kw,
3309 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3314 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3315 match self.channel_state {
3316 ChannelState::FundingNegotiated => f(),
3317 ChannelState::AwaitingChannelReady(flags) =>
3318 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3319 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3329 /// Returns the transaction if there is a pending funding transaction that is yet to be
3331 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3332 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3335 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3337 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3338 self.if_unbroadcasted_funding(||
3339 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3343 /// Returns whether the channel is funded in a batch.
3344 pub fn is_batch_funding(&self) -> bool {
3345 self.is_batch_funding.is_some()
3348 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3350 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3351 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3354 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3355 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3356 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3357 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3358 /// immediately (others we will have to allow to time out).
3359 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3360 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3361 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3362 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3363 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3364 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3366 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3367 // return them to fail the payment.
3368 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3369 let counterparty_node_id = self.get_counterparty_node_id();
3370 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3372 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3373 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3378 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3379 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3380 // returning a channel monitor update here would imply a channel monitor update before
3381 // we even registered the channel monitor to begin with, which is invalid.
3382 // Thus, if we aren't actually at a point where we could conceivably broadcast the
3383 // funding transaction, don't return a funding txo (which prevents providing the
3384 // monitor update to the user, even if we return one).
3385 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3386 if !self.channel_state.is_pre_funded_state() {
3387 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3388 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3389 update_id: self.latest_monitor_update_id,
3390 counterparty_node_id: Some(self.counterparty_node_id),
3391 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3392 channel_id: Some(self.channel_id()),
3396 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3397 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3399 self.channel_state = ChannelState::ShutdownComplete;
3400 self.update_time_counter += 1;
3404 dropped_outbound_htlcs,
3405 unbroadcasted_batch_funding_txid,
3406 channel_id: self.channel_id,
3407 user_channel_id: self.user_id,
3408 channel_capacity_satoshis: self.channel_value_satoshis,
3409 counterparty_node_id: self.counterparty_node_id,
3410 unbroadcasted_funding_tx,
3411 channel_funding_txo: self.get_funding_txo(),
3415 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3416 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3417 let counterparty_keys = self.build_remote_transaction_keys();
3418 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3420 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3421 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3422 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3423 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3425 match &self.holder_signer {
3426 // TODO (arik): move match into calling method for Taproot
3427 ChannelSignerType::Ecdsa(ecdsa) => {
3428 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3429 .map(|(signature, _)| msgs::FundingSigned {
3430 channel_id: self.channel_id(),
3433 partial_signature_with_nonce: None,
3437 if funding_signed.is_none() {
3438 #[cfg(not(async_signing))] {
3439 panic!("Failed to get signature for funding_signed");
3441 #[cfg(async_signing)] {
3442 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3443 self.signer_pending_funding = true;
3445 } else if self.signer_pending_funding {
3446 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3447 self.signer_pending_funding = false;
3450 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3451 (counterparty_initial_commitment_tx, funding_signed)
3453 // TODO (taproot|arik)
3459 /// If we receive an error message when attempting to open a channel, it may only be a rejection
3460 /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
3461 /// downgrade of channel features would be possible so that we can still open the channel.
3462 pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
3463 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
3466 F::Target: FeeEstimator
3468 if !self.is_outbound() ||
3470 self.channel_state, ChannelState::NegotiatingFunding(flags)
3471 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
3476 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
3477 // We've exhausted our options
3480 // We support opening a few different types of channels. Try removing our additional
3481 // features one by one until we've either arrived at our default or the counterparty has
3484 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
3485 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
3486 // checks whether the counterparty supports every feature, this would only happen if the
3487 // counterparty is advertising the feature, but rejecting channels proposing the feature for
3489 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
3490 self.channel_type.clear_anchors_zero_fee_htlc_tx();
3491 self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
3492 assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
3493 } else if self.channel_type.supports_scid_privacy() {
3494 self.channel_type.clear_scid_privacy();
3496 self.channel_type = ChannelTypeFeatures::only_static_remote_key();
3498 self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
3503 // Internal utility functions for channels
3505 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3506 /// `channel_value_satoshis` in msat, set through
3507 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3509 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3511 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3512 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3513 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3515 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3518 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3520 channel_value_satoshis * 10 * configured_percent
3523 /// Returns a minimum channel reserve value the remote needs to maintain,
3524 /// required by us according to the configured or default
3525 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3527 /// Guaranteed to return a value no larger than channel_value_satoshis
3529 /// This is used both for outbound and inbound channels and has lower bound
3530 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3531 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3532 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3533 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3536 /// This is for legacy reasons, present for forward-compatibility.
3537 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3538 /// from storage. Hence, we use this function to not persist default values of
3539 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3540 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3541 let (q, _) = channel_value_satoshis.overflowing_div(100);
3542 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3545 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3546 /// default of 1% of the total channel value.
3548 /// Guaranteed to return a value no larger than channel_value_satoshis
3550 /// This is used both for outbound and inbound channels and has lower bound
3551 /// of `dust_limit_satoshis`.
3552 #[cfg(any(dual_funding, splicing))]
3553 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3554 // Fixed at 1% of channel value by spec.
3555 let (q, _) = channel_value_satoshis.overflowing_div(100);
3556 cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3559 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3560 // Note that num_htlcs should not include dust HTLCs.
3562 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3563 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3566 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3567 // Note that num_htlcs should not include dust HTLCs.
3568 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3569 // Note that we need to divide before multiplying to round properly,
3570 // since the lowest denomination of bitcoin on-chain is the satoshi.
3571 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3574 pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 {
3575 // Note that we need to divide before multiplying to round properly,
3576 // since the lowest denomination of bitcoin on-chain is the satoshi.
3577 let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000;
3578 if channel_type_features.supports_anchors_zero_fee_htlc_tx() {
3579 commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000
3585 /// Context for dual-funded channels.
3586 #[cfg(any(dual_funding, splicing))]
3587 pub(super) struct DualFundingChannelContext {
3588 /// The amount in satoshis we will be contributing to the channel.
3589 pub our_funding_satoshis: u64,
3590 /// The amount in satoshis our counterparty will be contributing to the channel.
3591 pub their_funding_satoshis: u64,
3592 /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3593 /// to the current block height to align incentives against fee-sniping.
3594 pub funding_tx_locktime: u32,
3595 /// The feerate set by the initiator to be used for the funding transaction.
3596 pub funding_feerate_sat_per_1000_weight: u32,
3599 // Holder designates channel data owned for the benefit of the user client.
3600 // Counterparty designates channel data owned by the another channel participant entity.
3601 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3602 pub context: ChannelContext<SP>,
3603 #[cfg(any(dual_funding, splicing))]
3604 pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3607 #[cfg(any(test, fuzzing))]
3608 struct CommitmentTxInfoCached {
3610 total_pending_htlcs: usize,
3611 next_holder_htlc_id: u64,
3612 next_counterparty_htlc_id: u64,
3616 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3617 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3618 trait FailHTLCContents {
3619 type Message: FailHTLCMessageName;
3620 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3621 fn to_inbound_htlc_state(self) -> InboundHTLCState;
3622 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3624 impl FailHTLCContents for msgs::OnionErrorPacket {
3625 type Message = msgs::UpdateFailHTLC;
3626 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3627 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3629 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3630 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3632 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3633 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3636 impl FailHTLCContents for ([u8; 32], u16) {
3637 type Message = msgs::UpdateFailMalformedHTLC;
3638 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3639 msgs::UpdateFailMalformedHTLC {
3642 sha256_of_onion: self.0,
3643 failure_code: self.1
3646 fn to_inbound_htlc_state(self) -> InboundHTLCState {
3647 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3649 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3650 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3652 sha256_of_onion: self.0,
3653 failure_code: self.1
3658 trait FailHTLCMessageName {
3659 fn name() -> &'static str;
3661 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3662 fn name() -> &'static str {
3666 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3667 fn name() -> &'static str {
3668 "update_fail_malformed_htlc"
3672 impl<SP: Deref> Channel<SP> where
3673 SP::Target: SignerProvider,
3674 <SP::Target as SignerProvider>::EcdsaSigner: EcdsaChannelSigner
3676 fn check_remote_fee<F: Deref, L: Deref>(
3677 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3678 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3679 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3681 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3682 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3684 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3686 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3687 if feerate_per_kw < lower_limit {
3688 if let Some(cur_feerate) = cur_feerate_per_kw {
3689 if feerate_per_kw > cur_feerate {
3691 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3692 cur_feerate, feerate_per_kw);
3696 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3702 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3703 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3704 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3705 // outside of those situations will fail.
3706 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3710 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3715 1 + // script length (0)
3719 )*4 + // * 4 for non-witness parts
3720 2 + // witness marker and flag
3721 1 + // witness element count
3722 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
3723 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3724 2*(1 + 71); // two signatures + sighash type flags
3725 if let Some(spk) = a_scriptpubkey {
3726 ret += ((8+1) + // output values and script length
3727 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3729 if let Some(spk) = b_scriptpubkey {
3730 ret += ((8+1) + // output values and script length
3731 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
3737 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3738 assert!(self.context.pending_inbound_htlcs.is_empty());
3739 assert!(self.context.pending_outbound_htlcs.is_empty());
3740 assert!(self.context.pending_update_fee.is_none());
3742 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3743 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3744 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3746 if value_to_holder < 0 {
3747 assert!(self.context.is_outbound());
3748 total_fee_satoshis += (-value_to_holder) as u64;
3749 } else if value_to_counterparty < 0 {
3750 assert!(!self.context.is_outbound());
3751 total_fee_satoshis += (-value_to_counterparty) as u64;
3754 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3755 value_to_counterparty = 0;
3758 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3759 value_to_holder = 0;
3762 assert!(self.context.shutdown_scriptpubkey.is_some());
3763 let holder_shutdown_script = self.get_closing_scriptpubkey();
3764 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3765 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3767 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3768 (closing_transaction, total_fee_satoshis)
3771 fn funding_outpoint(&self) -> OutPoint {
3772 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3775 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3778 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3779 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3781 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3783 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3784 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3785 where L::Target: Logger {
3786 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3787 // (see equivalent if condition there).
3788 assert!(!self.context.channel_state.can_generate_new_commitment());
3789 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3790 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3791 self.context.latest_monitor_update_id = mon_update_id;
3792 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3793 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3797 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3798 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3799 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3800 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3802 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3803 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3806 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3807 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3808 // these, but for now we just have to treat them as normal.
3810 let mut pending_idx = core::usize::MAX;
3811 let mut htlc_value_msat = 0;
3812 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3813 if htlc.htlc_id == htlc_id_arg {
3814 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3815 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3816 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3818 InboundHTLCState::Committed => {},
3819 InboundHTLCState::LocalRemoved(ref reason) => {
3820 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3822 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3823 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3825 return UpdateFulfillFetch::DuplicateClaim {};
3828 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3829 // Don't return in release mode here so that we can update channel_monitor
3833 htlc_value_msat = htlc.amount_msat;
3837 if pending_idx == core::usize::MAX {
3838 #[cfg(any(test, fuzzing))]
3839 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3840 // this is simply a duplicate claim, not previously failed and we lost funds.
3841 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3842 return UpdateFulfillFetch::DuplicateClaim {};
3845 // Now update local state:
3847 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3848 // can claim it even if the channel hits the chain before we see their next commitment.
3849 self.context.latest_monitor_update_id += 1;
3850 let monitor_update = ChannelMonitorUpdate {
3851 update_id: self.context.latest_monitor_update_id,
3852 counterparty_node_id: Some(self.context.counterparty_node_id),
3853 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3854 payment_preimage: payment_preimage_arg.clone(),
3856 channel_id: Some(self.context.channel_id()),
3859 if !self.context.channel_state.can_generate_new_commitment() {
3860 // Note that this condition is the same as the assertion in
3861 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3862 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3863 // do not not get into this branch.
3864 for pending_update in self.context.holding_cell_htlc_updates.iter() {
3865 match pending_update {
3866 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3867 if htlc_id_arg == htlc_id {
3868 // Make sure we don't leave latest_monitor_update_id incremented here:
3869 self.context.latest_monitor_update_id -= 1;
3870 #[cfg(any(test, fuzzing))]
3871 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3872 return UpdateFulfillFetch::DuplicateClaim {};
3875 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3876 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3878 if htlc_id_arg == htlc_id {
3879 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3880 // TODO: We may actually be able to switch to a fulfill here, though its
3881 // rare enough it may not be worth the complexity burden.
3882 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3883 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3889 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3890 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3891 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3893 #[cfg(any(test, fuzzing))]
3894 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3895 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3897 #[cfg(any(test, fuzzing))]
3898 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3901 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3902 if let InboundHTLCState::Committed = htlc.state {
3904 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3905 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3907 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3908 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3911 UpdateFulfillFetch::NewClaim {
3914 msg: Some(msgs::UpdateFulfillHTLC {
3915 channel_id: self.context.channel_id(),
3916 htlc_id: htlc_id_arg,
3917 payment_preimage: payment_preimage_arg,
3922 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3923 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3924 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3925 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3926 // Even if we aren't supposed to let new monitor updates with commitment state
3927 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3928 // matter what. Sadly, to push a new monitor update which flies before others
3929 // already queued, we have to insert it into the pending queue and update the
3930 // update_ids of all the following monitors.
3931 if release_cs_monitor && msg.is_some() {
3932 let mut additional_update = self.build_commitment_no_status_check(logger);
3933 // build_commitment_no_status_check may bump latest_monitor_id but we want them
3934 // to be strictly increasing by one, so decrement it here.
3935 self.context.latest_monitor_update_id = monitor_update.update_id;
3936 monitor_update.updates.append(&mut additional_update.updates);
3938 let new_mon_id = self.context.blocked_monitor_updates.get(0)
3939 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3940 monitor_update.update_id = new_mon_id;
3941 for held_update in self.context.blocked_monitor_updates.iter_mut() {
3942 held_update.update.update_id += 1;
3945 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3946 let update = self.build_commitment_no_status_check(logger);
3947 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3953 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3954 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3956 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3960 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3961 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3962 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3963 /// before we fail backwards.
3965 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3966 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3967 /// [`ChannelError::Ignore`].
3968 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3969 -> Result<(), ChannelError> where L::Target: Logger {
3970 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3971 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3974 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3975 /// want to fail blinded HTLCs where we are not the intro node.
3977 /// See [`Self::queue_fail_htlc`] for more info.
3978 pub fn queue_fail_malformed_htlc<L: Deref>(
3979 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3980 ) -> Result<(), ChannelError> where L::Target: Logger {
3981 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3982 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3985 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3986 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3987 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3988 /// before we fail backwards.
3990 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3991 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3992 /// [`ChannelError::Ignore`].
3993 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3994 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3996 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3997 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3998 panic!("Was asked to fail an HTLC when channel was not in an operational state");
4001 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
4002 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
4003 // these, but for now we just have to treat them as normal.
4005 let mut pending_idx = core::usize::MAX;
4006 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
4007 if htlc.htlc_id == htlc_id_arg {
4009 InboundHTLCState::Committed => {},
4010 InboundHTLCState::LocalRemoved(ref reason) => {
4011 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4013 debug_assert!(false, "Tried to fail an HTLC that was already failed");
4018 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
4019 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
4025 if pending_idx == core::usize::MAX {
4026 #[cfg(any(test, fuzzing))]
4027 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
4028 // is simply a duplicate fail, not previously failed and we failed-back too early.
4029 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
4033 if !self.context.channel_state.can_generate_new_commitment() {
4034 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
4035 force_holding_cell = true;
4038 // Now update local state:
4039 if force_holding_cell {
4040 for pending_update in self.context.holding_cell_htlc_updates.iter() {
4041 match pending_update {
4042 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
4043 if htlc_id_arg == htlc_id {
4044 #[cfg(any(test, fuzzing))]
4045 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
4049 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
4050 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
4052 if htlc_id_arg == htlc_id {
4053 debug_assert!(false, "Tried to fail an HTLC that was already failed");
4054 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
4060 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
4061 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
4065 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
4066 E::Message::name(), &self.context.channel_id());
4068 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
4069 htlc.state = err_contents.clone().to_inbound_htlc_state();
4072 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
4075 // Message handlers:
4076 /// Updates the state of the channel to indicate that all channels in the batch have received
4077 /// funding_signed and persisted their monitors.
4078 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
4079 /// treated as a non-batch channel going forward.
4080 pub fn set_batch_ready(&mut self) {
4081 self.context.is_batch_funding = None;
4082 self.context.channel_state.clear_waiting_for_batch();
4085 /// Unsets the existing funding information.
4087 /// This must only be used if the channel has not yet completed funding and has not been used.
4089 /// Further, the channel must be immediately shut down after this with a call to
4090 /// [`ChannelContext::force_shutdown`].
4091 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
4092 debug_assert!(matches!(
4093 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
4095 self.context.channel_transaction_parameters.funding_outpoint = None;
4096 self.context.channel_id = temporary_channel_id;
4099 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
4100 /// and the channel is now usable (and public), this may generate an announcement_signatures to
4102 pub fn channel_ready<NS: Deref, L: Deref>(
4103 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
4104 user_config: &UserConfig, best_block: &BestBlock, logger: &L
4105 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
4107 NS::Target: NodeSigner,
4110 if self.context.channel_state.is_peer_disconnected() {
4111 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
4112 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
4115 if let Some(scid_alias) = msg.short_channel_id_alias {
4116 if Some(scid_alias) != self.context.short_channel_id {
4117 // The scid alias provided can be used to route payments *from* our counterparty,
4118 // i.e. can be used for inbound payments and provided in invoices, but is not used
4119 // when routing outbound payments.
4120 self.context.latest_inbound_scid_alias = Some(scid_alias);
4124 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
4125 // batch, but we can receive channel_ready messages.
4126 let mut check_reconnection = false;
4127 match &self.context.channel_state {
4128 ChannelState::AwaitingChannelReady(flags) => {
4129 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
4130 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4131 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
4132 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4133 check_reconnection = true;
4134 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
4135 self.context.channel_state.set_their_channel_ready();
4136 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
4137 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
4138 self.context.update_time_counter += 1;
4140 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
4141 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4144 // If we reconnected before sending our `channel_ready` they may still resend theirs.
4145 ChannelState::ChannelReady(_) => check_reconnection = true,
4146 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4148 if check_reconnection {
4149 // They probably disconnected/reconnected and re-sent the channel_ready, which is
4150 // required, or they're sending a fresh SCID alias.
4151 let expected_point =
4152 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4153 // If they haven't ever sent an updated point, the point they send should match
4155 self.context.counterparty_cur_commitment_point
4156 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4157 // If we've advanced the commitment number once, the second commitment point is
4158 // at `counterparty_prev_commitment_point`, which is not yet revoked.
4159 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4160 self.context.counterparty_prev_commitment_point
4162 // If they have sent updated points, channel_ready is always supposed to match
4163 // their "first" point, which we re-derive here.
4164 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4165 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4166 ).expect("We already advanced, so previous secret keys should have been validated already")))
4168 if expected_point != Some(msg.next_per_commitment_point) {
4169 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4174 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4175 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4177 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4179 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
4182 pub fn update_add_htlc<F: Deref>(
4183 &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4184 fee_estimator: &LowerBoundedFeeEstimator<F>,
4185 ) -> Result<(), ChannelError> where F::Target: FeeEstimator {
4186 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4187 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4189 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4190 if self.context.channel_state.is_remote_shutdown_sent() {
4191 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4193 if self.context.channel_state.is_peer_disconnected() {
4194 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4196 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4197 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4199 if msg.amount_msat == 0 {
4200 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4202 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4203 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4206 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
4207 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
4208 if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
4209 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4211 if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4212 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4215 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4216 // the reserve_satoshis we told them to always have as direct payment so that they lose
4217 // something if we punish them for broadcasting an old state).
4218 // Note that we don't really care about having a small/no to_remote output in our local
4219 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4220 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4221 // present in the next commitment transaction we send them (at least for fulfilled ones,
4222 // failed ones won't modify value_to_self).
4223 // Note that we will send HTLCs which another instance of rust-lightning would think
4224 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4225 // Channel state once they will not be present in the next received commitment
4227 let mut removed_outbound_total_msat = 0;
4228 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4229 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4230 removed_outbound_total_msat += htlc.amount_msat;
4231 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4232 removed_outbound_total_msat += htlc.amount_msat;
4236 let pending_value_to_self_msat =
4237 self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
4238 let pending_remote_value_msat =
4239 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4240 if pending_remote_value_msat < msg.amount_msat {
4241 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4244 // Check that the remote can afford to pay for this HTLC on-chain at the current
4245 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4247 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4248 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4249 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4251 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4252 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4256 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4257 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4259 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4260 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4264 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4265 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4269 if self.context.is_outbound() {
4270 // Check that they won't violate our local required channel reserve by adding this HTLC.
4271 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4272 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4273 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4274 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4277 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4278 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4280 if msg.cltv_expiry >= 500000000 {
4281 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4284 if self.context.channel_state.is_local_shutdown_sent() {
4285 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4286 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4290 // Now update local state:
4291 self.context.next_counterparty_htlc_id += 1;
4292 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4293 htlc_id: msg.htlc_id,
4294 amount_msat: msg.amount_msat,
4295 payment_hash: msg.payment_hash,
4296 cltv_expiry: msg.cltv_expiry,
4297 state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
4298 pending_htlc_status: pending_forward_status
4304 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4306 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4307 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4308 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4309 if htlc.htlc_id == htlc_id {
4310 let outcome = match check_preimage {
4311 None => fail_reason.into(),
4312 Some(payment_preimage) => {
4313 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4314 if payment_hash != htlc.payment_hash {
4315 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4317 OutboundHTLCOutcome::Success(Some(payment_preimage))
4321 OutboundHTLCState::LocalAnnounced(_) =>
4322 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4323 OutboundHTLCState::Committed => {
4324 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4326 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4327 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4332 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4335 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4336 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4337 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4339 if self.context.channel_state.is_peer_disconnected() {
4340 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4343 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4346 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4347 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4348 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4350 if self.context.channel_state.is_peer_disconnected() {
4351 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4354 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4358 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4359 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4360 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4362 if self.context.channel_state.is_peer_disconnected() {
4363 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4366 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4370 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4371 where L::Target: Logger
4373 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4374 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4376 if self.context.channel_state.is_peer_disconnected() {
4377 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4379 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4380 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4383 let funding_script = self.context.get_funding_redeemscript();
4385 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4387 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4388 let commitment_txid = {
4389 let trusted_tx = commitment_stats.tx.trust();
4390 let bitcoin_tx = trusted_tx.built_transaction();
4391 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4393 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4394 log_bytes!(msg.signature.serialize_compact()[..]),
4395 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4396 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4397 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4398 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4402 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4404 // If our counterparty updated the channel fee in this commitment transaction, check that
4405 // they can actually afford the new fee now.
4406 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4407 update_state == FeeUpdateState::RemoteAnnounced
4410 debug_assert!(!self.context.is_outbound());
4411 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4412 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4413 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4416 #[cfg(any(test, fuzzing))]
4418 if self.context.is_outbound() {
4419 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4420 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4421 if let Some(info) = projected_commit_tx_info {
4422 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4423 + self.context.holding_cell_htlc_updates.len();
4424 if info.total_pending_htlcs == total_pending_htlcs
4425 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4426 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4427 && info.feerate == self.context.feerate_per_kw {
4428 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4434 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4435 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4438 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4439 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4440 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4441 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4442 // backwards compatibility, we never use it in production. To provide test coverage, here,
4443 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4444 #[allow(unused_assignments, unused_mut)]
4445 let mut separate_nondust_htlc_sources = false;
4446 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4447 use core::hash::{BuildHasher, Hasher};
4448 // Get a random value using the only std API to do so - the DefaultHasher
4449 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4450 separate_nondust_htlc_sources = rand_val % 2 == 0;
4453 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4454 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4455 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4456 if let Some(_) = htlc.transaction_output_index {
4457 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4458 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4459 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4461 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4462 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4463 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap()[..]);
4464 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4465 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4466 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4467 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4468 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4470 if !separate_nondust_htlc_sources {
4471 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4474 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4476 if separate_nondust_htlc_sources {
4477 if let Some(source) = source_opt.take() {
4478 nondust_htlc_sources.push(source);
4481 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4484 let holder_commitment_tx = HolderCommitmentTransaction::new(
4485 commitment_stats.tx,
4487 msg.htlc_signatures.clone(),
4488 &self.context.get_holder_pubkeys().funding_pubkey,
4489 self.context.counterparty_funding_pubkey()
4492 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4493 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4495 // Update state now that we've passed all the can-fail calls...
4496 let mut need_commitment = false;
4497 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4498 if *update_state == FeeUpdateState::RemoteAnnounced {
4499 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4500 need_commitment = true;
4504 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4505 let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
4506 Some(resolution.clone())
4508 if let Some(htlc_resolution) = htlc_resolution {
4509 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4510 &htlc.payment_hash, &self.context.channel_id);
4511 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
4512 need_commitment = true;
4515 let mut claimed_htlcs = Vec::new();
4516 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4517 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4518 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4519 &htlc.payment_hash, &self.context.channel_id);
4520 // Grab the preimage, if it exists, instead of cloning
4521 let mut reason = OutboundHTLCOutcome::Success(None);
4522 mem::swap(outcome, &mut reason);
4523 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4524 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4525 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4526 // have a `Success(None)` reason. In this case we could forget some HTLC
4527 // claims, but such an upgrade is unlikely and including claimed HTLCs here
4528 // fixes a bug which the user was exposed to on 0.0.104 when they started the
4530 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4532 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4533 need_commitment = true;
4537 self.context.latest_monitor_update_id += 1;
4538 let mut monitor_update = ChannelMonitorUpdate {
4539 update_id: self.context.latest_monitor_update_id,
4540 counterparty_node_id: Some(self.context.counterparty_node_id),
4541 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4542 commitment_tx: holder_commitment_tx,
4543 htlc_outputs: htlcs_and_sigs,
4545 nondust_htlc_sources,
4547 channel_id: Some(self.context.channel_id()),
4550 self.context.cur_holder_commitment_transaction_number -= 1;
4551 self.context.expecting_peer_commitment_signed = false;
4552 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4553 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4554 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4556 if self.context.channel_state.is_monitor_update_in_progress() {
4557 // In case we initially failed monitor updating without requiring a response, we need
4558 // to make sure the RAA gets sent first.
4559 self.context.monitor_pending_revoke_and_ack = true;
4560 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4561 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4562 // the corresponding HTLC status updates so that
4563 // get_last_commitment_update_for_send includes the right HTLCs.
4564 self.context.monitor_pending_commitment_signed = true;
4565 let mut additional_update = self.build_commitment_no_status_check(logger);
4566 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4567 // strictly increasing by one, so decrement it here.
4568 self.context.latest_monitor_update_id = monitor_update.update_id;
4569 monitor_update.updates.append(&mut additional_update.updates);
4571 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4572 &self.context.channel_id);
4573 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4576 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4577 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4578 // we'll send one right away when we get the revoke_and_ack when we
4579 // free_holding_cell_htlcs().
4580 let mut additional_update = self.build_commitment_no_status_check(logger);
4581 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4582 // strictly increasing by one, so decrement it here.
4583 self.context.latest_monitor_update_id = monitor_update.update_id;
4584 monitor_update.updates.append(&mut additional_update.updates);
4588 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4589 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4590 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4591 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4594 /// Public version of the below, checking relevant preconditions first.
4595 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4596 /// returns `(None, Vec::new())`.
4597 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4598 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4599 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4600 where F::Target: FeeEstimator, L::Target: Logger
4602 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4603 self.free_holding_cell_htlcs(fee_estimator, logger)
4604 } else { (None, Vec::new()) }
4607 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4608 /// for our counterparty.
4609 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4610 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4611 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4612 where F::Target: FeeEstimator, L::Target: Logger
4614 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4615 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4616 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4617 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4619 let mut monitor_update = ChannelMonitorUpdate {
4620 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4621 counterparty_node_id: Some(self.context.counterparty_node_id),
4622 updates: Vec::new(),
4623 channel_id: Some(self.context.channel_id()),
4626 let mut htlc_updates = Vec::new();
4627 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4628 let mut update_add_count = 0;
4629 let mut update_fulfill_count = 0;
4630 let mut update_fail_count = 0;
4631 let mut htlcs_to_fail = Vec::new();
4632 for htlc_update in htlc_updates.drain(..) {
4633 // Note that this *can* fail, though it should be due to rather-rare conditions on
4634 // fee races with adding too many outputs which push our total payments just over
4635 // the limit. In case it's less rare than I anticipate, we may want to revisit
4636 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4637 // to rebalance channels.
4638 let fail_htlc_res = match &htlc_update {
4639 &HTLCUpdateAwaitingACK::AddHTLC {
4640 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4641 skimmed_fee_msat, blinding_point, ..
4643 match self.send_htlc(
4644 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4645 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4647 Ok(_) => update_add_count += 1,
4650 ChannelError::Ignore(ref msg) => {
4651 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4652 // If we fail to send here, then this HTLC should
4653 // be failed backwards. Failing to send here
4654 // indicates that this HTLC may keep being put back
4655 // into the holding cell without ever being
4656 // successfully forwarded/failed/fulfilled, causing
4657 // our counterparty to eventually close on us.
4658 htlcs_to_fail.push((source.clone(), *payment_hash));
4661 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4668 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4669 // If an HTLC claim was previously added to the holding cell (via
4670 // `get_update_fulfill_htlc`, then generating the claim message itself must
4671 // not fail - any in between attempts to claim the HTLC will have resulted
4672 // in it hitting the holding cell again and we cannot change the state of a
4673 // holding cell HTLC from fulfill to anything else.
4674 let mut additional_monitor_update =
4675 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4676 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4677 { monitor_update } else { unreachable!() };
4678 update_fulfill_count += 1;
4679 monitor_update.updates.append(&mut additional_monitor_update.updates);
4682 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4683 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4684 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4686 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4687 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4688 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4691 if let Some(res) = fail_htlc_res {
4693 Ok(fail_msg_opt) => {
4694 // If an HTLC failure was previously added to the holding cell (via
4695 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4696 // not fail - we should never end up in a state where we double-fail
4697 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4698 // for a full revocation before failing.
4699 debug_assert!(fail_msg_opt.is_some());
4700 update_fail_count += 1;
4702 Err(ChannelError::Ignore(_)) => {},
4704 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4709 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4710 return (None, htlcs_to_fail);
4712 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4713 self.send_update_fee(feerate, false, fee_estimator, logger)
4718 let mut additional_update = self.build_commitment_no_status_check(logger);
4719 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4720 // but we want them to be strictly increasing by one, so reset it here.
4721 self.context.latest_monitor_update_id = monitor_update.update_id;
4722 monitor_update.updates.append(&mut additional_update.updates);
4724 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4725 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4726 update_add_count, update_fulfill_count, update_fail_count);
4728 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4729 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4735 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4736 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4737 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4738 /// generating an appropriate error *after* the channel state has been updated based on the
4739 /// revoke_and_ack message.
4740 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4741 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4742 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4743 where F::Target: FeeEstimator, L::Target: Logger,
4745 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4746 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4748 if self.context.channel_state.is_peer_disconnected() {
4749 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4751 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4752 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4755 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4757 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4758 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4759 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4763 if !self.context.channel_state.is_awaiting_remote_revoke() {
4764 // Our counterparty seems to have burned their coins to us (by revoking a state when we
4765 // haven't given them a new commitment transaction to broadcast). We should probably
4766 // take advantage of this by updating our channel monitor, sending them an error, and
4767 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4768 // lot of work, and there's some chance this is all a misunderstanding anyway.
4769 // We have to do *something*, though, since our signer may get mad at us for otherwise
4770 // jumping a remote commitment number, so best to just force-close and move on.
4771 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4774 #[cfg(any(test, fuzzing))]
4776 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4777 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4780 match &self.context.holder_signer {
4781 ChannelSignerType::Ecdsa(ecdsa) => {
4782 ecdsa.validate_counterparty_revocation(
4783 self.context.cur_counterparty_commitment_transaction_number + 1,
4785 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4787 // TODO (taproot|arik)
4792 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4793 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4794 self.context.latest_monitor_update_id += 1;
4795 let mut monitor_update = ChannelMonitorUpdate {
4796 update_id: self.context.latest_monitor_update_id,
4797 counterparty_node_id: Some(self.context.counterparty_node_id),
4798 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4799 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4800 secret: msg.per_commitment_secret,
4802 channel_id: Some(self.context.channel_id()),
4805 // Update state now that we've passed all the can-fail calls...
4806 // (note that we may still fail to generate the new commitment_signed message, but that's
4807 // OK, we step the channel here and *then* if the new generation fails we can fail the
4808 // channel based on that, but stepping stuff here should be safe either way.
4809 self.context.channel_state.clear_awaiting_remote_revoke();
4810 self.context.sent_message_awaiting_response = None;
4811 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4812 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4813 self.context.cur_counterparty_commitment_transaction_number -= 1;
4815 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4816 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4819 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4820 let mut to_forward_infos = Vec::new();
4821 let mut pending_update_adds = Vec::new();
4822 let mut revoked_htlcs = Vec::new();
4823 let mut finalized_claimed_htlcs = Vec::new();
4824 let mut update_fail_htlcs = Vec::new();
4825 let mut update_fail_malformed_htlcs = Vec::new();
4826 let mut require_commitment = false;
4827 let mut value_to_self_msat_diff: i64 = 0;
4830 // Take references explicitly so that we can hold multiple references to self.context.
4831 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4832 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4833 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4835 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4836 pending_inbound_htlcs.retain(|htlc| {
4837 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4838 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4839 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4840 value_to_self_msat_diff += htlc.amount_msat as i64;
4842 *expecting_peer_commitment_signed = true;
4846 pending_outbound_htlcs.retain(|htlc| {
4847 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4848 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4849 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4850 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4852 finalized_claimed_htlcs.push(htlc.source.clone());
4853 // They fulfilled, so we sent them money
4854 value_to_self_msat_diff -= htlc.amount_msat as i64;
4859 for htlc in pending_inbound_htlcs.iter_mut() {
4860 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4862 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4866 let mut state = InboundHTLCState::Committed;
4867 mem::swap(&mut state, &mut htlc.state);
4869 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
4870 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4871 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
4872 require_commitment = true;
4873 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
4875 InboundHTLCResolution::Resolved { pending_htlc_status } =>
4876 match pending_htlc_status {
4877 PendingHTLCStatus::Fail(fail_msg) => {
4878 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4879 require_commitment = true;
4881 HTLCFailureMsg::Relay(msg) => {
4882 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4883 update_fail_htlcs.push(msg)
4885 HTLCFailureMsg::Malformed(msg) => {
4886 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4887 update_fail_malformed_htlcs.push(msg)
4891 PendingHTLCStatus::Forward(forward_info) => {
4892 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
4893 to_forward_infos.push((forward_info, htlc.htlc_id));
4894 htlc.state = InboundHTLCState::Committed;
4897 InboundHTLCResolution::Pending { update_add_htlc } => {
4898 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4899 pending_update_adds.push(update_add_htlc);
4900 htlc.state = InboundHTLCState::Committed;
4906 for htlc in pending_outbound_htlcs.iter_mut() {
4907 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4908 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4909 htlc.state = OutboundHTLCState::Committed;
4910 *expecting_peer_commitment_signed = true;
4912 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4913 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4914 // Grab the preimage, if it exists, instead of cloning
4915 let mut reason = OutboundHTLCOutcome::Success(None);
4916 mem::swap(outcome, &mut reason);
4917 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4918 require_commitment = true;
4922 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4924 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4925 match update_state {
4926 FeeUpdateState::Outbound => {
4927 debug_assert!(self.context.is_outbound());
4928 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4929 self.context.feerate_per_kw = feerate;
4930 self.context.pending_update_fee = None;
4931 self.context.expecting_peer_commitment_signed = true;
4933 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4934 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4935 debug_assert!(!self.context.is_outbound());
4936 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4937 require_commitment = true;
4938 self.context.feerate_per_kw = feerate;
4939 self.context.pending_update_fee = None;
4944 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4945 let release_state_str =
4946 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4947 macro_rules! return_with_htlcs_to_fail {
4948 ($htlcs_to_fail: expr) => {
4949 if !release_monitor {
4950 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4951 update: monitor_update,
4953 return Ok(($htlcs_to_fail, None));
4955 return Ok(($htlcs_to_fail, Some(monitor_update)));
4960 self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
4962 if self.context.channel_state.is_monitor_update_in_progress() {
4963 // We can't actually generate a new commitment transaction (incl by freeing holding
4964 // cells) while we can't update the monitor, so we just return what we have.
4965 if require_commitment {
4966 self.context.monitor_pending_commitment_signed = true;
4967 // When the monitor updating is restored we'll call
4968 // get_last_commitment_update_for_send(), which does not update state, but we're
4969 // definitely now awaiting a remote revoke before we can step forward any more, so
4971 let mut additional_update = self.build_commitment_no_status_check(logger);
4972 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4973 // strictly increasing by one, so decrement it here.
4974 self.context.latest_monitor_update_id = monitor_update.update_id;
4975 monitor_update.updates.append(&mut additional_update.updates);
4977 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4978 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4979 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4980 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4981 return_with_htlcs_to_fail!(Vec::new());
4984 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4985 (Some(mut additional_update), htlcs_to_fail) => {
4986 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4987 // strictly increasing by one, so decrement it here.
4988 self.context.latest_monitor_update_id = monitor_update.update_id;
4989 monitor_update.updates.append(&mut additional_update.updates);
4991 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4992 &self.context.channel_id(), release_state_str);
4994 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4995 return_with_htlcs_to_fail!(htlcs_to_fail);
4997 (None, htlcs_to_fail) => {
4998 if require_commitment {
4999 let mut additional_update = self.build_commitment_no_status_check(logger);
5001 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
5002 // strictly increasing by one, so decrement it here.
5003 self.context.latest_monitor_update_id = monitor_update.update_id;
5004 monitor_update.updates.append(&mut additional_update.updates);
5006 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
5007 &self.context.channel_id(),
5008 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
5011 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
5012 return_with_htlcs_to_fail!(htlcs_to_fail);
5014 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
5015 &self.context.channel_id(), release_state_str);
5017 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
5018 return_with_htlcs_to_fail!(htlcs_to_fail);
5024 /// Queues up an outbound update fee by placing it in the holding cell. You should call
5025 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5026 /// commitment update.
5027 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
5028 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5029 where F::Target: FeeEstimator, L::Target: Logger
5031 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
5032 assert!(msg_opt.is_none(), "We forced holding cell?");
5035 /// Adds a pending update to this channel. See the doc for send_htlc for
5036 /// further details on the optionness of the return value.
5037 /// If our balance is too low to cover the cost of the next commitment transaction at the
5038 /// new feerate, the update is cancelled.
5040 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
5041 /// [`Channel`] if `force_holding_cell` is false.
5042 fn send_update_fee<F: Deref, L: Deref>(
5043 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
5044 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5045 ) -> Option<msgs::UpdateFee>
5046 where F::Target: FeeEstimator, L::Target: Logger
5048 if !self.context.is_outbound() {
5049 panic!("Cannot send fee from inbound channel");
5051 if !self.context.is_usable() {
5052 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
5054 if !self.context.is_live() {
5055 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
5058 // Before proposing a feerate update, check that we can actually afford the new fee.
5059 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
5060 let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate);
5061 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
5062 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
5063 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
5064 let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
5065 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
5066 //TODO: auto-close after a number of failures?
5067 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
5071 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
5072 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
5073 if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5074 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5077 if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5078 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5082 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
5083 force_holding_cell = true;
5086 if force_holding_cell {
5087 self.context.holding_cell_update_fee = Some(feerate_per_kw);
5091 debug_assert!(self.context.pending_update_fee.is_none());
5092 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
5094 Some(msgs::UpdateFee {
5095 channel_id: self.context.channel_id,
5100 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
5101 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
5103 /// No further message handling calls may be made until a channel_reestablish dance has
5105 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5106 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5107 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5108 if self.context.channel_state.is_pre_funded_state() {
5112 if self.context.channel_state.is_peer_disconnected() {
5113 // While the below code should be idempotent, it's simpler to just return early, as
5114 // redundant disconnect events can fire, though they should be rare.
5118 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5119 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5122 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5123 // will be retransmitted.
5124 self.context.last_sent_closing_fee = None;
5125 self.context.pending_counterparty_closing_signed = None;
5126 self.context.closing_fee_limits = None;
5128 let mut inbound_drop_count = 0;
5129 self.context.pending_inbound_htlcs.retain(|htlc| {
5131 InboundHTLCState::RemoteAnnounced(_) => {
5132 // They sent us an update_add_htlc but we never got the commitment_signed.
5133 // We'll tell them what commitment_signed we're expecting next and they'll drop
5134 // this HTLC accordingly
5135 inbound_drop_count += 1;
5138 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5139 // We received a commitment_signed updating this HTLC and (at least hopefully)
5140 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5141 // in response to it yet, so don't touch it.
5144 InboundHTLCState::Committed => true,
5145 InboundHTLCState::LocalRemoved(_) => {
5146 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5147 // re-transmit if needed) and they may have even sent a revoke_and_ack back
5148 // (that we missed). Keep this around for now and if they tell us they missed
5149 // the commitment_signed we can re-transmit the update then.
5154 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5156 if let Some((_, update_state)) = self.context.pending_update_fee {
5157 if update_state == FeeUpdateState::RemoteAnnounced {
5158 debug_assert!(!self.context.is_outbound());
5159 self.context.pending_update_fee = None;
5163 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5164 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5165 // They sent us an update to remove this but haven't yet sent the corresponding
5166 // commitment_signed, we need to move it back to Committed and they can re-send
5167 // the update upon reconnection.
5168 htlc.state = OutboundHTLCState::Committed;
5172 self.context.sent_message_awaiting_response = None;
5174 self.context.channel_state.set_peer_disconnected();
5175 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5179 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5180 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5181 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5182 /// update completes (potentially immediately).
5183 /// The messages which were generated with the monitor update must *not* have been sent to the
5184 /// remote end, and must instead have been dropped. They will be regenerated when
5185 /// [`Self::monitor_updating_restored`] is called.
5187 /// [`ChannelManager`]: super::channelmanager::ChannelManager
5188 /// [`chain::Watch`]: crate::chain::Watch
5189 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5190 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5191 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5192 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5193 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5195 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5196 self.context.monitor_pending_commitment_signed |= resend_commitment;
5197 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5198 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5199 self.context.monitor_pending_failures.append(&mut pending_fails);
5200 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5201 self.context.channel_state.set_monitor_update_in_progress();
5204 /// Indicates that the latest ChannelMonitor update has been committed by the client
5205 /// successfully and we should restore normal operation. Returns messages which should be sent
5206 /// to the remote side.
5207 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5208 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5209 user_config: &UserConfig, best_block_height: u32
5210 ) -> MonitorRestoreUpdates
5213 NS::Target: NodeSigner
5215 assert!(self.context.channel_state.is_monitor_update_in_progress());
5216 self.context.channel_state.clear_monitor_update_in_progress();
5218 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5219 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5220 // first received the funding_signed.
5221 let mut funding_broadcastable =
5222 if self.context.is_outbound() &&
5223 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5224 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5226 self.context.funding_transaction.take()
5228 // That said, if the funding transaction is already confirmed (ie we're active with a
5229 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5230 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5231 funding_broadcastable = None;
5234 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5235 // (and we assume the user never directly broadcasts the funding transaction and waits for
5236 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5237 // * an inbound channel that failed to persist the monitor on funding_created and we got
5238 // the funding transaction confirmed before the monitor was persisted, or
5239 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5240 let channel_ready = if self.context.monitor_pending_channel_ready {
5241 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5242 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5243 self.context.monitor_pending_channel_ready = false;
5244 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5245 Some(msgs::ChannelReady {
5246 channel_id: self.context.channel_id(),
5247 next_per_commitment_point,
5248 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5252 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5254 let mut accepted_htlcs = Vec::new();
5255 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5256 let mut failed_htlcs = Vec::new();
5257 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5258 let mut finalized_claimed_htlcs = Vec::new();
5259 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5260 let mut pending_update_adds = Vec::new();
5261 mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
5263 if self.context.channel_state.is_peer_disconnected() {
5264 self.context.monitor_pending_revoke_and_ack = false;
5265 self.context.monitor_pending_commitment_signed = false;
5266 return MonitorRestoreUpdates {
5267 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5268 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
5269 funding_broadcastable, channel_ready, announcement_sigs
5273 let raa = if self.context.monitor_pending_revoke_and_ack {
5274 Some(self.get_last_revoke_and_ack())
5276 let commitment_update = if self.context.monitor_pending_commitment_signed {
5277 self.get_last_commitment_update_for_send(logger).ok()
5279 if commitment_update.is_some() {
5280 self.mark_awaiting_response();
5283 self.context.monitor_pending_revoke_and_ack = false;
5284 self.context.monitor_pending_commitment_signed = false;
5285 let order = self.context.resend_order.clone();
5286 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5287 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5288 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5289 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5290 MonitorRestoreUpdates {
5291 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
5292 pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
5296 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5297 where F::Target: FeeEstimator, L::Target: Logger
5299 if self.context.is_outbound() {
5300 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5302 if self.context.channel_state.is_peer_disconnected() {
5303 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5305 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5307 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5308 self.context.update_time_counter += 1;
5309 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5310 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
5311 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
5312 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
5313 if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5314 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5315 msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
5317 if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5318 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5319 msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
5324 /// Indicates that the signer may have some signatures for us, so we should retry if we're
5326 #[cfg(async_signing)]
5327 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5328 let commitment_update = if self.context.signer_pending_commitment_update {
5329 self.get_last_commitment_update_for_send(logger).ok()
5331 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5332 self.context.get_funding_signed_msg(logger).1
5334 let channel_ready = if funding_signed.is_some() {
5335 self.check_get_channel_ready(0)
5338 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5339 if commitment_update.is_some() { "a" } else { "no" },
5340 if funding_signed.is_some() { "a" } else { "no" },
5341 if channel_ready.is_some() { "a" } else { "no" });
5343 SignerResumeUpdates {
5350 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5351 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5352 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5353 msgs::RevokeAndACK {
5354 channel_id: self.context.channel_id,
5355 per_commitment_secret,
5356 next_per_commitment_point,
5358 next_local_nonce: None,
5362 /// Gets the last commitment update for immediate sending to our peer.
5363 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5364 let mut update_add_htlcs = Vec::new();
5365 let mut update_fulfill_htlcs = Vec::new();
5366 let mut update_fail_htlcs = Vec::new();
5367 let mut update_fail_malformed_htlcs = Vec::new();
5369 for htlc in self.context.pending_outbound_htlcs.iter() {
5370 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5371 update_add_htlcs.push(msgs::UpdateAddHTLC {
5372 channel_id: self.context.channel_id(),
5373 htlc_id: htlc.htlc_id,
5374 amount_msat: htlc.amount_msat,
5375 payment_hash: htlc.payment_hash,
5376 cltv_expiry: htlc.cltv_expiry,
5377 onion_routing_packet: (**onion_packet).clone(),
5378 skimmed_fee_msat: htlc.skimmed_fee_msat,
5379 blinding_point: htlc.blinding_point,
5384 for htlc in self.context.pending_inbound_htlcs.iter() {
5385 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5387 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5388 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5389 channel_id: self.context.channel_id(),
5390 htlc_id: htlc.htlc_id,
5391 reason: err_packet.clone()
5394 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5395 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5396 channel_id: self.context.channel_id(),
5397 htlc_id: htlc.htlc_id,
5398 sha256_of_onion: sha256_of_onion.clone(),
5399 failure_code: failure_code.clone(),
5402 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5403 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5404 channel_id: self.context.channel_id(),
5405 htlc_id: htlc.htlc_id,
5406 payment_preimage: payment_preimage.clone(),
5413 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5414 Some(msgs::UpdateFee {
5415 channel_id: self.context.channel_id(),
5416 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5420 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5421 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5422 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5423 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5424 if self.context.signer_pending_commitment_update {
5425 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5426 self.context.signer_pending_commitment_update = false;
5430 #[cfg(not(async_signing))] {
5431 panic!("Failed to get signature for new commitment state");
5433 #[cfg(async_signing)] {
5434 if !self.context.signer_pending_commitment_update {
5435 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5436 self.context.signer_pending_commitment_update = true;
5441 Ok(msgs::CommitmentUpdate {
5442 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5447 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5448 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5449 if self.context.channel_state.is_local_shutdown_sent() {
5450 assert!(self.context.shutdown_scriptpubkey.is_some());
5451 Some(msgs::Shutdown {
5452 channel_id: self.context.channel_id,
5453 scriptpubkey: self.get_closing_scriptpubkey(),
5458 /// May panic if some calls other than message-handling calls (which will all Err immediately)
5459 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5461 /// Some links printed in log lines are included here to check them during build (when run with
5462 /// `cargo doc --document-private-items`):
5463 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5464 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5465 pub fn channel_reestablish<L: Deref, NS: Deref>(
5466 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5467 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5468 ) -> Result<ReestablishResponses, ChannelError>
5471 NS::Target: NodeSigner
5473 if !self.context.channel_state.is_peer_disconnected() {
5474 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5475 // almost certainly indicates we are going to end up out-of-sync in some way, so we
5476 // just close here instead of trying to recover.
5477 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5480 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5481 msg.next_local_commitment_number == 0 {
5482 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5485 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5486 if msg.next_remote_commitment_number > 0 {
5487 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5488 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5489 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5490 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5491 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5493 if msg.next_remote_commitment_number > our_commitment_transaction {
5494 macro_rules! log_and_panic {
5495 ($err_msg: expr) => {
5496 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5497 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5500 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5501 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5502 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5503 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5504 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5505 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5506 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5507 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5511 // Before we change the state of the channel, we check if the peer is sending a very old
5512 // commitment transaction number, if yes we send a warning message.
5513 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5514 return Err(ChannelError::Warn(format!(
5515 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5516 msg.next_remote_commitment_number,
5517 our_commitment_transaction
5521 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5522 // remaining cases either succeed or ErrorMessage-fail).
5523 self.context.channel_state.clear_peer_disconnected();
5524 self.context.sent_message_awaiting_response = None;
5526 let shutdown_msg = self.get_outbound_shutdown();
5528 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
5530 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5531 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5532 if !self.context.channel_state.is_our_channel_ready() ||
5533 self.context.channel_state.is_monitor_update_in_progress() {
5534 if msg.next_remote_commitment_number != 0 {
5535 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5537 // Short circuit the whole handler as there is nothing we can resend them
5538 return Ok(ReestablishResponses {
5539 channel_ready: None,
5540 raa: None, commitment_update: None,
5541 order: RAACommitmentOrder::CommitmentFirst,
5542 shutdown_msg, announcement_sigs,
5546 // We have OurChannelReady set!
5547 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5548 return Ok(ReestablishResponses {
5549 channel_ready: Some(msgs::ChannelReady {
5550 channel_id: self.context.channel_id(),
5551 next_per_commitment_point,
5552 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5554 raa: None, commitment_update: None,
5555 order: RAACommitmentOrder::CommitmentFirst,
5556 shutdown_msg, announcement_sigs,
5560 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5561 // Remote isn't waiting on any RevokeAndACK from us!
5562 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5564 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5565 if self.context.channel_state.is_monitor_update_in_progress() {
5566 self.context.monitor_pending_revoke_and_ack = true;
5569 Some(self.get_last_revoke_and_ack())
5572 debug_assert!(false, "All values should have been handled in the four cases above");
5573 return Err(ChannelError::Close(format!(
5574 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5575 msg.next_remote_commitment_number,
5576 our_commitment_transaction
5580 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5581 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5582 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5583 // the corresponding revoke_and_ack back yet.
5584 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5585 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5586 self.mark_awaiting_response();
5588 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5590 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5591 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5592 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5593 Some(msgs::ChannelReady {
5594 channel_id: self.context.channel_id(),
5595 next_per_commitment_point,
5596 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5600 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5601 if required_revoke.is_some() {
5602 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5604 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5607 Ok(ReestablishResponses {
5608 channel_ready, shutdown_msg, announcement_sigs,
5609 raa: required_revoke,
5610 commitment_update: None,
5611 order: self.context.resend_order.clone(),
5613 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5614 if required_revoke.is_some() {
5615 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5617 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5620 if self.context.channel_state.is_monitor_update_in_progress() {
5621 self.context.monitor_pending_commitment_signed = true;
5622 Ok(ReestablishResponses {
5623 channel_ready, shutdown_msg, announcement_sigs,
5624 commitment_update: None, raa: None,
5625 order: self.context.resend_order.clone(),
5628 Ok(ReestablishResponses {
5629 channel_ready, shutdown_msg, announcement_sigs,
5630 raa: required_revoke,
5631 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5632 order: self.context.resend_order.clone(),
5635 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5636 Err(ChannelError::Close(format!(
5637 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5638 msg.next_local_commitment_number,
5639 next_counterparty_commitment_number,
5642 Err(ChannelError::Close(format!(
5643 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5644 msg.next_local_commitment_number,
5645 next_counterparty_commitment_number,
5650 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5651 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5652 /// at which point they will be recalculated.
5653 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5655 where F::Target: FeeEstimator
5657 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5659 // Propose a range from our current Background feerate to our Normal feerate plus our
5660 // force_close_avoidance_max_fee_satoshis.
5661 // If we fail to come to consensus, we'll have to force-close.
5662 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5663 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5664 // that we don't expect to need fee bumping
5665 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5666 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5668 // The spec requires that (when the channel does not have anchors) we only send absolute
5669 // channel fees no greater than the absolute channel fee on the current commitment
5670 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5671 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5672 // some force-closure by old nodes, but we wanted to close the channel anyway.
5674 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5675 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5676 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5677 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5680 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5681 // below our dust limit, causing the output to disappear. We don't bother handling this
5682 // case, however, as this should only happen if a channel is closed before any (material)
5683 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5684 // come to consensus with our counterparty on appropriate fees, however it should be a
5685 // relatively rare case. We can revisit this later, though note that in order to determine
5686 // if the funders' output is dust we have to know the absolute fee we're going to use.
5687 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5688 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5689 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5690 // We always add force_close_avoidance_max_fee_satoshis to our normal
5691 // feerate-calculated fee, but allow the max to be overridden if we're using a
5692 // target feerate-calculated fee.
5693 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5694 proposed_max_feerate as u64 * tx_weight / 1000)
5696 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5699 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5700 self.context.closing_fee_limits.clone().unwrap()
5703 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5704 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5705 /// this point if we're the funder we should send the initial closing_signed, and in any case
5706 /// shutdown should complete within a reasonable timeframe.
5707 fn closing_negotiation_ready(&self) -> bool {
5708 self.context.closing_negotiation_ready()
5711 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5712 /// an Err if no progress is being made and the channel should be force-closed instead.
5713 /// Should be called on a one-minute timer.
5714 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5715 if self.closing_negotiation_ready() {
5716 if self.context.closing_signed_in_flight {
5717 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5719 self.context.closing_signed_in_flight = true;
5725 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5726 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5727 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5728 where F::Target: FeeEstimator, L::Target: Logger
5730 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5731 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5732 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5733 // that closing_negotiation_ready checks this case (as well as a few others).
5734 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5735 return Ok((None, None, None));
5738 if !self.context.is_outbound() {
5739 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5740 return self.closing_signed(fee_estimator, &msg);
5742 return Ok((None, None, None));
5745 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5746 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5747 if self.context.expecting_peer_commitment_signed {
5748 return Ok((None, None, None));
5751 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5753 assert!(self.context.shutdown_scriptpubkey.is_some());
5754 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5755 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5756 our_min_fee, our_max_fee, total_fee_satoshis);
5758 match &self.context.holder_signer {
5759 ChannelSignerType::Ecdsa(ecdsa) => {
5761 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5762 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5764 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5765 Ok((Some(msgs::ClosingSigned {
5766 channel_id: self.context.channel_id,
5767 fee_satoshis: total_fee_satoshis,
5769 fee_range: Some(msgs::ClosingSignedFeeRange {
5770 min_fee_satoshis: our_min_fee,
5771 max_fee_satoshis: our_max_fee,
5775 // TODO (taproot|arik)
5781 // Marks a channel as waiting for a response from the counterparty. If it's not received
5782 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5784 fn mark_awaiting_response(&mut self) {
5785 self.context.sent_message_awaiting_response = Some(0);
5788 /// Determines whether we should disconnect the counterparty due to not receiving a response
5789 /// within our expected timeframe.
5791 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5792 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5793 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5796 // Don't disconnect when we're not waiting on a response.
5799 *ticks_elapsed += 1;
5800 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5804 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5805 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5807 if self.context.channel_state.is_peer_disconnected() {
5808 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5810 if self.context.channel_state.is_pre_funded_state() {
5811 // Spec says we should fail the connection, not the channel, but that's nonsense, there
5812 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5813 // can do that via error message without getting a connection fail anyway...
5814 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5816 for htlc in self.context.pending_inbound_htlcs.iter() {
5817 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5818 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5821 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5823 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5824 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5827 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5828 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5829 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5832 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5835 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5836 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5837 // any further commitment updates after we set LocalShutdownSent.
5838 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5840 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5843 assert!(send_shutdown);
5844 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5845 Ok(scriptpubkey) => scriptpubkey,
5846 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5848 if !shutdown_scriptpubkey.is_compatible(their_features) {
5849 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5851 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5856 // From here on out, we may not fail!
5858 self.context.channel_state.set_remote_shutdown_sent();
5859 self.context.update_time_counter += 1;
5861 let monitor_update = if update_shutdown_script {
5862 self.context.latest_monitor_update_id += 1;
5863 let monitor_update = ChannelMonitorUpdate {
5864 update_id: self.context.latest_monitor_update_id,
5865 counterparty_node_id: Some(self.context.counterparty_node_id),
5866 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5867 scriptpubkey: self.get_closing_scriptpubkey(),
5869 channel_id: Some(self.context.channel_id()),
5871 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5872 self.push_ret_blockable_mon_update(monitor_update)
5874 let shutdown = if send_shutdown {
5875 Some(msgs::Shutdown {
5876 channel_id: self.context.channel_id,
5877 scriptpubkey: self.get_closing_scriptpubkey(),
5881 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5882 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5883 // cell HTLCs and return them to fail the payment.
5884 self.context.holding_cell_update_fee = None;
5885 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5886 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5888 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5889 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5896 self.context.channel_state.set_local_shutdown_sent();
5897 self.context.update_time_counter += 1;
5899 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5902 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5903 let mut tx = closing_tx.trust().built_transaction().clone();
5905 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5907 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5908 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5909 let mut holder_sig = sig.serialize_der().to_vec();
5910 holder_sig.push(EcdsaSighashType::All as u8);
5911 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5912 cp_sig.push(EcdsaSighashType::All as u8);
5913 if funding_key[..] < counterparty_funding_key[..] {
5914 tx.input[0].witness.push(holder_sig);
5915 tx.input[0].witness.push(cp_sig);
5917 tx.input[0].witness.push(cp_sig);
5918 tx.input[0].witness.push(holder_sig);
5921 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5925 pub fn closing_signed<F: Deref>(
5926 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5927 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5928 where F::Target: FeeEstimator
5930 if !self.context.channel_state.is_both_sides_shutdown() {
5931 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5933 if self.context.channel_state.is_peer_disconnected() {
5934 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5936 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5937 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5939 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5940 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5943 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5944 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5947 if self.context.channel_state.is_monitor_update_in_progress() {
5948 self.context.pending_counterparty_closing_signed = Some(msg.clone());
5949 return Ok((None, None, None));
5952 let funding_redeemscript = self.context.get_funding_redeemscript();
5953 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5954 if used_total_fee != msg.fee_satoshis {
5955 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5957 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5959 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5962 // The remote end may have decided to revoke their output due to inconsistent dust
5963 // limits, so check for that case by re-checking the signature here.
5964 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5965 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5966 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5970 for outp in closing_tx.trust().built_transaction().output.iter() {
5971 if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) {
5972 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5976 let closure_reason = if self.initiated_shutdown() {
5977 ClosureReason::LocallyInitiatedCooperativeClosure
5979 ClosureReason::CounterpartyInitiatedCooperativeClosure
5982 assert!(self.context.shutdown_scriptpubkey.is_some());
5983 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5984 if last_fee == msg.fee_satoshis {
5985 let shutdown_result = ShutdownResult {
5987 monitor_update: None,
5988 dropped_outbound_htlcs: Vec::new(),
5989 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5990 channel_id: self.context.channel_id,
5991 user_channel_id: self.context.user_id,
5992 channel_capacity_satoshis: self.context.channel_value_satoshis,
5993 counterparty_node_id: self.context.counterparty_node_id,
5994 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5995 channel_funding_txo: self.context.get_funding_txo(),
5997 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5998 self.context.channel_state = ChannelState::ShutdownComplete;
5999 self.context.update_time_counter += 1;
6000 return Ok((None, Some(tx), Some(shutdown_result)));
6004 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
6006 macro_rules! propose_fee {
6007 ($new_fee: expr) => {
6008 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
6009 (closing_tx, $new_fee)
6011 self.build_closing_transaction($new_fee, false)
6014 return match &self.context.holder_signer {
6015 ChannelSignerType::Ecdsa(ecdsa) => {
6017 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
6018 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
6019 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
6020 let shutdown_result = ShutdownResult {
6022 monitor_update: None,
6023 dropped_outbound_htlcs: Vec::new(),
6024 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
6025 channel_id: self.context.channel_id,
6026 user_channel_id: self.context.user_id,
6027 channel_capacity_satoshis: self.context.channel_value_satoshis,
6028 counterparty_node_id: self.context.counterparty_node_id,
6029 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
6030 channel_funding_txo: self.context.get_funding_txo(),
6032 self.context.channel_state = ChannelState::ShutdownComplete;
6033 self.context.update_time_counter += 1;
6034 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
6035 (Some(tx), Some(shutdown_result))
6040 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
6041 Ok((Some(msgs::ClosingSigned {
6042 channel_id: self.context.channel_id,
6043 fee_satoshis: used_fee,
6045 fee_range: Some(msgs::ClosingSignedFeeRange {
6046 min_fee_satoshis: our_min_fee,
6047 max_fee_satoshis: our_max_fee,
6049 }), signed_tx, shutdown_result))
6051 // TODO (taproot|arik)
6058 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
6059 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
6060 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
6062 if max_fee_satoshis < our_min_fee {
6063 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
6065 if min_fee_satoshis > our_max_fee {
6066 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
6069 if !self.context.is_outbound() {
6070 // They have to pay, so pick the highest fee in the overlapping range.
6071 // We should never set an upper bound aside from their full balance
6072 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
6073 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
6075 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
6076 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
6077 msg.fee_satoshis, our_min_fee, our_max_fee)));
6079 // The proposed fee is in our acceptable range, accept it and broadcast!
6080 propose_fee!(msg.fee_satoshis);
6083 // Old fee style negotiation. We don't bother to enforce whether they are complying
6084 // with the "making progress" requirements, we just comply and hope for the best.
6085 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
6086 if msg.fee_satoshis > last_fee {
6087 if msg.fee_satoshis < our_max_fee {
6088 propose_fee!(msg.fee_satoshis);
6089 } else if last_fee < our_max_fee {
6090 propose_fee!(our_max_fee);
6092 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
6095 if msg.fee_satoshis > our_min_fee {
6096 propose_fee!(msg.fee_satoshis);
6097 } else if last_fee > our_min_fee {
6098 propose_fee!(our_min_fee);
6100 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
6104 if msg.fee_satoshis < our_min_fee {
6105 propose_fee!(our_min_fee);
6106 } else if msg.fee_satoshis > our_max_fee {
6107 propose_fee!(our_max_fee);
6109 propose_fee!(msg.fee_satoshis);
6115 fn internal_htlc_satisfies_config(
6116 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6117 ) -> Result<(), (&'static str, u16)> {
6118 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6119 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6120 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6121 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6123 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6124 0x1000 | 12, // fee_insufficient
6127 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6129 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6130 0x1000 | 13, // incorrect_cltv_expiry
6136 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6137 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6138 /// unsuccessful, falls back to the previous one if one exists.
6139 pub fn htlc_satisfies_config(
6140 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6141 ) -> Result<(), (&'static str, u16)> {
6142 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6144 if let Some(prev_config) = self.context.prev_config() {
6145 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6152 pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6153 &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6154 ) -> Result<(), (&'static str, u16)>
6156 F::Target: FeeEstimator,
6159 if self.context.channel_state.is_local_shutdown_sent() {
6160 return Err(("Shutdown was already sent", 0x4000|8))
6163 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
6164 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
6165 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
6166 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6169 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6170 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6171 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6173 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6174 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6175 let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6176 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6177 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6178 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6179 return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6182 let htlc_dust_exposure_msat =
6183 per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type);
6184 let counterparty_tx_dust_exposure =
6185 htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
6186 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
6187 log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx",
6188 counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat);
6189 return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7))
6193 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6194 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6195 let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6196 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6197 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6198 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6199 return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6203 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6204 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6209 let mut removed_outbound_total_msat = 0;
6210 for ref htlc in self.context.pending_outbound_htlcs.iter() {
6211 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6212 removed_outbound_total_msat += htlc.amount_msat;
6213 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6214 removed_outbound_total_msat += htlc.amount_msat;
6218 let pending_value_to_self_msat =
6219 self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
6220 let pending_remote_value_msat =
6221 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6223 if !self.context.is_outbound() {
6224 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6225 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
6226 // side, only on the sender's. Note that with anchor outputs we are no longer as
6227 // sensitive to fee spikes, so we need to account for them.
6228 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6229 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6230 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6231 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6233 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6234 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6235 return Err(("Fee spike buffer violation", 0x1000|7));
6242 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6243 self.context.cur_holder_commitment_transaction_number + 1
6246 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6247 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6250 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6251 self.context.cur_counterparty_commitment_transaction_number + 2
6255 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6256 &self.context.holder_signer
6260 pub fn get_value_stat(&self) -> ChannelValueStat {
6262 value_to_self_msat: self.context.value_to_self_msat,
6263 channel_value_msat: self.context.channel_value_satoshis * 1000,
6264 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6265 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6266 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6267 holding_cell_outbound_amount_msat: {
6269 for h in self.context.holding_cell_htlc_updates.iter() {
6271 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6279 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6280 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6284 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6285 /// Allowed in any state (including after shutdown)
6286 pub fn is_awaiting_monitor_update(&self) -> bool {
6287 self.context.channel_state.is_monitor_update_in_progress()
6290 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6291 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6292 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6293 self.context.blocked_monitor_updates[0].update.update_id - 1
6296 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6297 /// further blocked monitor update exists after the next.
6298 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6299 if self.context.blocked_monitor_updates.is_empty() { return None; }
6300 Some((self.context.blocked_monitor_updates.remove(0).update,
6301 !self.context.blocked_monitor_updates.is_empty()))
6304 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6305 /// immediately given to the user for persisting or `None` if it should be held as blocked.
6306 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6307 -> Option<ChannelMonitorUpdate> {
6308 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6309 if !release_monitor {
6310 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6319 /// On startup, its possible we detect some monitor updates have actually completed (and the
6320 /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
6321 /// here after logging them.
6322 pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
6323 let channel_id = self.context.channel_id();
6324 self.context.blocked_monitor_updates.retain(|update| {
6325 if update.update.update_id <= loaded_mon_update_id {
6328 "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
6329 update.update.update_id,
6339 pub fn blocked_monitor_updates_pending(&self) -> usize {
6340 self.context.blocked_monitor_updates.len()
6343 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6344 /// If the channel is outbound, this implies we have not yet broadcasted the funding
6345 /// transaction. If the channel is inbound, this implies simply that the channel has not
6347 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6348 if !self.is_awaiting_monitor_update() { return false; }
6350 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6351 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6353 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6354 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6355 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6358 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6359 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6360 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6361 // waiting for the initial monitor persistence. Thus, we check if our commitment
6362 // transaction numbers have both been iterated only exactly once (for the
6363 // funding_signed), and we're awaiting monitor update.
6365 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6366 // only way to get an awaiting-monitor-update state during initial funding is if the
6367 // initial monitor persistence is still pending).
6369 // Because deciding we're awaiting initial broadcast spuriously could result in
6370 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6371 // we hard-assert here, even in production builds.
6372 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6373 assert!(self.context.monitor_pending_channel_ready);
6374 assert_eq!(self.context.latest_monitor_update_id, 0);
6380 /// Returns true if our channel_ready has been sent
6381 pub fn is_our_channel_ready(&self) -> bool {
6382 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6383 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6386 /// Returns true if our peer has either initiated or agreed to shut down the channel.
6387 pub fn received_shutdown(&self) -> bool {
6388 self.context.channel_state.is_remote_shutdown_sent()
6391 /// Returns true if we either initiated or agreed to shut down the channel.
6392 pub fn sent_shutdown(&self) -> bool {
6393 self.context.channel_state.is_local_shutdown_sent()
6396 /// Returns true if we initiated to shut down the channel.
6397 pub fn initiated_shutdown(&self) -> bool {
6398 self.context.local_initiated_shutdown.is_some()
6401 /// Returns true if this channel is fully shut down. True here implies that no further actions
6402 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6403 /// will be handled appropriately by the chain monitor.
6404 pub fn is_shutdown(&self) -> bool {
6405 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6408 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6409 self.context.channel_update_status
6412 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6413 self.context.update_time_counter += 1;
6414 self.context.channel_update_status = status;
6417 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6419 // * always when a new block/transactions are confirmed with the new height
6420 // * when funding is signed with a height of 0
6421 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6425 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6426 if funding_tx_confirmations <= 0 {
6427 self.context.funding_tx_confirmation_height = 0;
6430 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6434 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6435 // channel_ready yet.
6436 if self.context.signer_pending_funding {
6440 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6441 // channel_ready until the entire batch is ready.
6442 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6443 self.context.channel_state.set_our_channel_ready();
6445 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6446 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6447 self.context.update_time_counter += 1;
6449 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6450 // We got a reorg but not enough to trigger a force close, just ignore.
6453 if self.context.funding_tx_confirmation_height != 0 &&
6454 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6456 // We should never see a funding transaction on-chain until we've received
6457 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6458 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6459 // however, may do this and we shouldn't treat it as a bug.
6460 #[cfg(not(fuzzing))]
6461 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6462 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6463 self.context.channel_state.to_u32());
6465 // We got a reorg but not enough to trigger a force close, just ignore.
6469 if need_commitment_update {
6470 if !self.context.channel_state.is_monitor_update_in_progress() {
6471 if !self.context.channel_state.is_peer_disconnected() {
6472 let next_per_commitment_point =
6473 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6474 return Some(msgs::ChannelReady {
6475 channel_id: self.context.channel_id,
6476 next_per_commitment_point,
6477 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6481 self.context.monitor_pending_channel_ready = true;
6487 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6488 /// In the first case, we store the confirmation height and calculating the short channel id.
6489 /// In the second, we simply return an Err indicating we need to be force-closed now.
6490 pub fn transactions_confirmed<NS: Deref, L: Deref>(
6491 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6492 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6493 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6495 NS::Target: NodeSigner,
6498 let mut msgs = (None, None);
6499 if let Some(funding_txo) = self.context.get_funding_txo() {
6500 for &(index_in_block, tx) in txdata.iter() {
6501 // Check if the transaction is the expected funding transaction, and if it is,
6502 // check that it pays the right amount to the right script.
6503 if self.context.funding_tx_confirmation_height == 0 {
6504 if tx.txid() == funding_txo.txid {
6505 let txo_idx = funding_txo.index as usize;
6506 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_p2wsh() ||
6507 tx.output[txo_idx].value.to_sat() != self.context.channel_value_satoshis {
6508 if self.context.is_outbound() {
6509 // If we generated the funding transaction and it doesn't match what it
6510 // should, the client is really broken and we should just panic and
6511 // tell them off. That said, because hash collisions happen with high
6512 // probability in fuzzing mode, if we're fuzzing we just close the
6513 // channel and move on.
6514 #[cfg(not(fuzzing))]
6515 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6517 self.context.update_time_counter += 1;
6518 let err_reason = "funding tx had wrong script/value or output index";
6519 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6521 if self.context.is_outbound() {
6522 if !tx.is_coinbase() {
6523 for input in tx.input.iter() {
6524 if input.witness.is_empty() {
6525 // We generated a malleable funding transaction, implying we've
6526 // just exposed ourselves to funds loss to our counterparty.
6527 #[cfg(not(fuzzing))]
6528 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6533 self.context.funding_tx_confirmation_height = height;
6534 self.context.funding_tx_confirmed_in = Some(*block_hash);
6535 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6536 Ok(scid) => Some(scid),
6537 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6540 // If this is a coinbase transaction and not a 0-conf channel
6541 // we should update our min_depth to 100 to handle coinbase maturity
6542 if tx.is_coinbase() &&
6543 self.context.minimum_depth.unwrap_or(0) > 0 &&
6544 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6545 self.context.minimum_depth = Some(COINBASE_MATURITY);
6548 // If we allow 1-conf funding, we may need to check for channel_ready here and
6549 // send it immediately instead of waiting for a best_block_updated call (which
6550 // may have already happened for this block).
6551 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6552 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6553 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6554 msgs = (Some(channel_ready), announcement_sigs);
6557 for inp in tx.input.iter() {
6558 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6559 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6560 return Err(ClosureReason::CommitmentTxConfirmed);
6568 /// When a new block is connected, we check the height of the block against outbound holding
6569 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6570 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6571 /// handled by the ChannelMonitor.
6573 /// If we return Err, the channel may have been closed, at which point the standard
6574 /// requirements apply - no calls may be made except those explicitly stated to be allowed
6577 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6579 pub fn best_block_updated<NS: Deref, L: Deref>(
6580 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6581 node_signer: &NS, user_config: &UserConfig, logger: &L
6582 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6584 NS::Target: NodeSigner,
6587 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6590 fn do_best_block_updated<NS: Deref, L: Deref>(
6591 &mut self, height: u32, highest_header_time: u32,
6592 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6593 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6595 NS::Target: NodeSigner,
6598 let mut timed_out_htlcs = Vec::new();
6599 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6600 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6602 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6603 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6605 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6606 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6607 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6615 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6617 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6618 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6619 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6621 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6622 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6625 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6626 self.context.channel_state.is_our_channel_ready() {
6627 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6628 if self.context.funding_tx_confirmation_height == 0 {
6629 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6630 // zero if it has been reorged out, however in either case, our state flags
6631 // indicate we've already sent a channel_ready
6632 funding_tx_confirmations = 0;
6635 // If we've sent channel_ready (or have both sent and received channel_ready), and
6636 // the funding transaction has become unconfirmed,
6637 // close the channel and hope we can get the latest state on chain (because presumably
6638 // the funding transaction is at least still in the mempool of most nodes).
6640 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6641 // 0-conf channel, but not doing so may lead to the
6642 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
6644 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6645 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6646 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6647 return Err(ClosureReason::ProcessingError { err: err_reason });
6649 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6650 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6651 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6652 // If funding_tx_confirmed_in is unset, the channel must not be active
6653 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6654 assert!(!self.context.channel_state.is_our_channel_ready());
6655 return Err(ClosureReason::FundingTimedOut);
6658 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6659 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6661 Ok((None, timed_out_htlcs, announcement_sigs))
6664 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6665 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6666 /// before the channel has reached channel_ready and we can just wait for more blocks.
6667 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6668 if self.context.funding_tx_confirmation_height != 0 {
6669 // We handle the funding disconnection by calling best_block_updated with a height one
6670 // below where our funding was connected, implying a reorg back to conf_height - 1.
6671 let reorg_height = self.context.funding_tx_confirmation_height - 1;
6672 // We use the time field to bump the current time we set on channel updates if its
6673 // larger. If we don't know that time has moved forward, we can just set it to the last
6674 // time we saw and it will be ignored.
6675 let best_time = self.context.update_time_counter;
6676 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6677 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6678 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6679 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6680 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6686 // We never learned about the funding confirmation anyway, just ignore
6691 // Methods to get unprompted messages to send to the remote end (or where we already returned
6692 // something in the handler for the message that prompted this message):
6694 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6695 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6696 /// directions). Should be used for both broadcasted announcements and in response to an
6697 /// AnnouncementSignatures message from the remote peer.
6699 /// Will only fail if we're not in a state where channel_announcement may be sent (including
6702 /// This will only return ChannelError::Ignore upon failure.
6704 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6705 fn get_channel_announcement<NS: Deref>(
6706 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6707 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6708 if !self.context.config.announced_channel {
6709 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6711 if !self.context.is_usable() {
6712 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6715 let short_channel_id = self.context.get_short_channel_id()
6716 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6717 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6718 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6719 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6720 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6722 let msg = msgs::UnsignedChannelAnnouncement {
6723 features: channelmanager::provided_channel_features(&user_config),
6726 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6727 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6728 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6729 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6730 excess_data: Vec::new(),
6736 fn get_announcement_sigs<NS: Deref, L: Deref>(
6737 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6738 best_block_height: u32, logger: &L
6739 ) -> Option<msgs::AnnouncementSignatures>
6741 NS::Target: NodeSigner,
6744 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6748 if !self.context.is_usable() {
6752 if self.context.channel_state.is_peer_disconnected() {
6753 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6757 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6761 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6762 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6765 log_trace!(logger, "{:?}", e);
6769 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6771 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6776 match &self.context.holder_signer {
6777 ChannelSignerType::Ecdsa(ecdsa) => {
6778 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6780 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6785 let short_channel_id = match self.context.get_short_channel_id() {
6787 None => return None,
6790 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6792 Some(msgs::AnnouncementSignatures {
6793 channel_id: self.context.channel_id(),
6795 node_signature: our_node_sig,
6796 bitcoin_signature: our_bitcoin_sig,
6799 // TODO (taproot|arik)
6805 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6807 fn sign_channel_announcement<NS: Deref>(
6808 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6809 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6810 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6811 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6812 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6813 let were_node_one = announcement.node_id_1 == our_node_key;
6815 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6816 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6817 match &self.context.holder_signer {
6818 ChannelSignerType::Ecdsa(ecdsa) => {
6819 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6820 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6821 Ok(msgs::ChannelAnnouncement {
6822 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6823 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6824 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6825 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6826 contents: announcement,
6829 // TODO (taproot|arik)
6834 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6838 /// Processes an incoming announcement_signatures message, providing a fully-signed
6839 /// channel_announcement message which we can broadcast and storing our counterparty's
6840 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6841 pub fn announcement_signatures<NS: Deref>(
6842 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6843 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6844 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6845 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6847 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6849 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6850 return Err(ChannelError::Close(format!(
6851 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6852 &announcement, self.context.get_counterparty_node_id())));
6854 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6855 return Err(ChannelError::Close(format!(
6856 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6857 &announcement, self.context.counterparty_funding_pubkey())));
6860 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6861 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6862 return Err(ChannelError::Ignore(
6863 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6866 self.sign_channel_announcement(node_signer, announcement)
6869 /// Gets a signed channel_announcement for this channel, if we previously received an
6870 /// announcement_signatures from our counterparty.
6871 pub fn get_signed_channel_announcement<NS: Deref>(
6872 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6873 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6874 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6877 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6879 Err(_) => return None,
6881 match self.sign_channel_announcement(node_signer, announcement) {
6882 Ok(res) => Some(res),
6887 /// May panic if called on a channel that wasn't immediately-previously
6888 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6889 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6890 assert!(self.context.channel_state.is_peer_disconnected());
6891 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6892 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6893 // current to_remote balances. However, it no longer has any use, and thus is now simply
6894 // set to a dummy (but valid, as required by the spec) public key.
6895 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6896 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6897 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6898 let mut pk = [2; 33]; pk[1] = 0xff;
6899 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6900 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6901 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6902 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6905 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6908 self.mark_awaiting_response();
6909 msgs::ChannelReestablish {
6910 channel_id: self.context.channel_id(),
6911 // The protocol has two different commitment number concepts - the "commitment
6912 // transaction number", which starts from 0 and counts up, and the "revocation key
6913 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6914 // commitment transaction numbers by the index which will be used to reveal the
6915 // revocation key for that commitment transaction, which means we have to convert them
6916 // to protocol-level commitment numbers here...
6918 // next_local_commitment_number is the next commitment_signed number we expect to
6919 // receive (indicating if they need to resend one that we missed).
6920 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6921 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6922 // receive, however we track it by the next commitment number for a remote transaction
6923 // (which is one further, as they always revoke previous commitment transaction, not
6924 // the one we send) so we have to decrement by 1. Note that if
6925 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6926 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6928 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6929 your_last_per_commitment_secret: remote_last_secret,
6930 my_current_per_commitment_point: dummy_pubkey,
6931 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6932 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6933 // txid of that interactive transaction, else we MUST NOT set it.
6934 next_funding_txid: None,
6939 // Send stuff to our remote peers:
6941 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6942 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6943 /// commitment update.
6945 /// `Err`s will only be [`ChannelError::Ignore`].
6946 pub fn queue_add_htlc<F: Deref, L: Deref>(
6947 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6948 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6949 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6950 ) -> Result<(), ChannelError>
6951 where F::Target: FeeEstimator, L::Target: Logger
6954 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6955 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6956 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6958 if let ChannelError::Ignore(_) = err { /* fine */ }
6959 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6964 /// Adds a pending outbound HTLC to this channel, note that you probably want
6965 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6967 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6969 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6970 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6972 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6973 /// we may not yet have sent the previous commitment update messages and will need to
6974 /// regenerate them.
6976 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6977 /// on this [`Channel`] if `force_holding_cell` is false.
6979 /// `Err`s will only be [`ChannelError::Ignore`].
6980 fn send_htlc<F: Deref, L: Deref>(
6981 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6982 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6983 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6984 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6985 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6986 where F::Target: FeeEstimator, L::Target: Logger
6988 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6989 self.context.channel_state.is_local_shutdown_sent() ||
6990 self.context.channel_state.is_remote_shutdown_sent()
6992 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6994 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6995 if amount_msat > channel_total_msat {
6996 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6999 if amount_msat == 0 {
7000 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
7003 let available_balances = self.context.get_available_balances(fee_estimator);
7004 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
7005 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
7006 available_balances.next_outbound_htlc_minimum_msat)));
7009 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
7010 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
7011 available_balances.next_outbound_htlc_limit_msat)));
7014 if self.context.channel_state.is_peer_disconnected() {
7015 // Note that this should never really happen, if we're !is_live() on receipt of an
7016 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
7017 // the user to send directly into a !is_live() channel. However, if we
7018 // disconnected during the time the previous hop was doing the commitment dance we may
7019 // end up getting here after the forwarding delay. In any case, returning an
7020 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
7021 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
7024 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
7025 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
7026 payment_hash, amount_msat,
7027 if force_holding_cell { "into holding cell" }
7028 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
7029 else { "to peer" });
7031 if need_holding_cell {
7032 force_holding_cell = true;
7035 // Now update local state:
7036 if force_holding_cell {
7037 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
7042 onion_routing_packet,
7049 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7050 htlc_id: self.context.next_holder_htlc_id,
7052 payment_hash: payment_hash.clone(),
7054 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
7060 let res = msgs::UpdateAddHTLC {
7061 channel_id: self.context.channel_id,
7062 htlc_id: self.context.next_holder_htlc_id,
7066 onion_routing_packet,
7070 self.context.next_holder_htlc_id += 1;
7075 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
7076 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
7077 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
7078 // fail to generate this, we still are at least at a position where upgrading their status
7080 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
7081 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
7082 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
7084 if let Some(state) = new_state {
7085 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
7089 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
7090 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
7091 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
7092 // Grab the preimage, if it exists, instead of cloning
7093 let mut reason = OutboundHTLCOutcome::Success(None);
7094 mem::swap(outcome, &mut reason);
7095 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
7098 if let Some((feerate, update_state)) = self.context.pending_update_fee {
7099 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
7100 debug_assert!(!self.context.is_outbound());
7101 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
7102 self.context.feerate_per_kw = feerate;
7103 self.context.pending_update_fee = None;
7106 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
7108 let (mut htlcs_ref, counterparty_commitment_tx) =
7109 self.build_commitment_no_state_update(logger);
7110 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
7111 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
7112 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
7114 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
7115 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
7118 self.context.latest_monitor_update_id += 1;
7119 let monitor_update = ChannelMonitorUpdate {
7120 update_id: self.context.latest_monitor_update_id,
7121 counterparty_node_id: Some(self.context.counterparty_node_id),
7122 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
7123 commitment_txid: counterparty_commitment_txid,
7124 htlc_outputs: htlcs.clone(),
7125 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
7126 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
7127 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
7128 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
7129 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
7131 channel_id: Some(self.context.channel_id()),
7133 self.context.channel_state.set_awaiting_remote_revoke();
7137 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
7138 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
7139 where L::Target: Logger
7141 let counterparty_keys = self.context.build_remote_transaction_keys();
7142 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7143 let counterparty_commitment_tx = commitment_stats.tx;
7145 #[cfg(any(test, fuzzing))]
7147 if !self.context.is_outbound() {
7148 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
7149 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
7150 if let Some(info) = projected_commit_tx_info {
7151 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
7152 if info.total_pending_htlcs == total_pending_htlcs
7153 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
7154 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
7155 && info.feerate == self.context.feerate_per_kw {
7156 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
7157 assert_eq!(actual_fee, info.fee);
7163 (commitment_stats.htlcs_included, counterparty_commitment_tx)
7166 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
7167 /// generation when we shouldn't change HTLC/channel state.
7168 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
7169 // Get the fee tests from `build_commitment_no_state_update`
7170 #[cfg(any(test, fuzzing))]
7171 self.build_commitment_no_state_update(logger);
7173 let counterparty_keys = self.context.build_remote_transaction_keys();
7174 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7175 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
7177 match &self.context.holder_signer {
7178 ChannelSignerType::Ecdsa(ecdsa) => {
7179 let (signature, htlc_signatures);
7182 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
7183 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
7187 let res = ecdsa.sign_counterparty_commitment(
7188 &commitment_stats.tx,
7189 commitment_stats.inbound_htlc_preimages,
7190 commitment_stats.outbound_htlc_preimages,
7191 &self.context.secp_ctx,
7192 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
7194 htlc_signatures = res.1;
7196 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
7197 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
7198 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
7199 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
7201 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
7202 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
7203 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
7204 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
7205 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
7206 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
7210 Ok((msgs::CommitmentSigned {
7211 channel_id: self.context.channel_id,
7215 partial_signature_with_nonce: None,
7216 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7218 // TODO (taproot|arik)
7224 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7225 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7227 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7228 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7229 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7230 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7231 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7232 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7233 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7234 where F::Target: FeeEstimator, L::Target: Logger
7236 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7237 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7238 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7241 let monitor_update = self.build_commitment_no_status_check(logger);
7242 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7243 Ok(self.push_ret_blockable_mon_update(monitor_update))
7249 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7251 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7252 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7253 fee_base_msat: msg.contents.fee_base_msat,
7254 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7255 cltv_expiry_delta: msg.contents.cltv_expiry_delta
7257 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7259 self.context.counterparty_forwarding_info = new_forwarding_info;
7265 /// Begins the shutdown process, getting a message for the remote peer and returning all
7266 /// holding cell HTLCs for payment failure.
7267 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7268 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7269 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7271 for htlc in self.context.pending_outbound_htlcs.iter() {
7272 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7273 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7276 if self.context.channel_state.is_local_shutdown_sent() {
7277 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7279 else if self.context.channel_state.is_remote_shutdown_sent() {
7280 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7282 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7283 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7285 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7286 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7287 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7290 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7293 // use override shutdown script if provided
7294 let shutdown_scriptpubkey = match override_shutdown_script {
7295 Some(script) => script,
7297 // otherwise, use the shutdown scriptpubkey provided by the signer
7298 match signer_provider.get_shutdown_scriptpubkey() {
7299 Ok(scriptpubkey) => scriptpubkey,
7300 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7304 if !shutdown_scriptpubkey.is_compatible(their_features) {
7305 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7307 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7312 // From here on out, we may not fail!
7313 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7314 self.context.channel_state.set_local_shutdown_sent();
7315 self.context.local_initiated_shutdown = Some(());
7316 self.context.update_time_counter += 1;
7318 let monitor_update = if update_shutdown_script {
7319 self.context.latest_monitor_update_id += 1;
7320 let monitor_update = ChannelMonitorUpdate {
7321 update_id: self.context.latest_monitor_update_id,
7322 counterparty_node_id: Some(self.context.counterparty_node_id),
7323 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7324 scriptpubkey: self.get_closing_scriptpubkey(),
7326 channel_id: Some(self.context.channel_id()),
7328 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7329 self.push_ret_blockable_mon_update(monitor_update)
7331 let shutdown = msgs::Shutdown {
7332 channel_id: self.context.channel_id,
7333 scriptpubkey: self.get_closing_scriptpubkey(),
7336 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7337 // our shutdown until we've committed all of the pending changes.
7338 self.context.holding_cell_update_fee = None;
7339 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7340 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7342 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7343 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7350 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7351 "we can't both complete shutdown and return a monitor update");
7353 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7356 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7357 self.context.holding_cell_htlc_updates.iter()
7358 .flat_map(|htlc_update| {
7360 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7361 => Some((source, payment_hash)),
7365 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7369 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7370 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7371 pub context: ChannelContext<SP>,
7372 pub unfunded_context: UnfundedChannelContext,
7375 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7376 pub fn new<ES: Deref, F: Deref>(
7377 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7378 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7379 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7380 ) -> Result<OutboundV1Channel<SP>, APIError>
7381 where ES::Target: EntropySource,
7382 F::Target: FeeEstimator
7384 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7385 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7386 // Protocol level safety check in place, although it should never happen because
7387 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7388 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7389 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7392 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7393 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7394 let pubkeys = holder_signer.pubkeys().clone();
7397 context: ChannelContext::new_for_outbound_channel(
7401 counterparty_node_id,
7403 channel_value_satoshis,
7407 current_chain_height,
7408 outbound_scid_alias,
7409 temporary_channel_id,
7410 holder_selected_channel_reserve_satoshis,
7415 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7420 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7421 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7422 let counterparty_keys = self.context.build_remote_transaction_keys();
7423 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7424 let signature = match &self.context.holder_signer {
7425 // TODO (taproot|arik): move match into calling method for Taproot
7426 ChannelSignerType::Ecdsa(ecdsa) => {
7427 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7428 .map(|(sig, _)| sig).ok()?
7430 // TODO (taproot|arik)
7435 if self.context.signer_pending_funding {
7436 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7437 self.context.signer_pending_funding = false;
7440 Some(msgs::FundingCreated {
7441 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7442 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7443 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7446 partial_signature_with_nonce: None,
7448 next_local_nonce: None,
7452 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7453 /// a funding_created message for the remote peer.
7454 /// Panics if called at some time other than immediately after initial handshake, if called twice,
7455 /// or if called on an inbound channel.
7456 /// Note that channel_id changes during this call!
7457 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7458 /// If an Err is returned, it is a ChannelError::Close.
7459 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7460 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7461 if !self.context.is_outbound() {
7462 panic!("Tried to create outbound funding_created message on an inbound channel!");
7465 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7466 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7468 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7470 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7471 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7472 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7473 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7476 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7477 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7479 // Now that we're past error-generating stuff, update our local state:
7481 self.context.channel_state = ChannelState::FundingNegotiated;
7482 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7484 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7485 // We can skip this if it is a zero-conf channel.
7486 if funding_transaction.is_coinbase() &&
7487 self.context.minimum_depth.unwrap_or(0) > 0 &&
7488 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7489 self.context.minimum_depth = Some(COINBASE_MATURITY);
7492 self.context.funding_transaction = Some(funding_transaction);
7493 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7495 let funding_created = self.get_funding_created_msg(logger);
7496 if funding_created.is_none() {
7497 #[cfg(not(async_signing))] {
7498 panic!("Failed to get signature for new funding creation");
7500 #[cfg(async_signing)] {
7501 if !self.context.signer_pending_funding {
7502 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7503 self.context.signer_pending_funding = true;
7511 /// If we receive an error message, it may only be a rejection of the channel type we tried,
7512 /// not of our ability to open any channel at all. Thus, on error, we should first call this
7513 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7514 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7515 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7516 ) -> Result<msgs::OpenChannel, ()>
7518 F::Target: FeeEstimator
7520 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7521 Ok(self.get_open_channel(chain_hash))
7524 /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again.
7525 pub fn is_resumable(&self) -> bool {
7526 !self.context.have_received_message() &&
7527 self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER
7530 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7531 if !self.context.is_outbound() {
7532 panic!("Tried to open a channel for an inbound channel?");
7534 if self.context.have_received_message() {
7535 panic!("Cannot generate an open_channel after we've moved forward");
7538 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7539 panic!("Tried to send an open_channel for a channel that has already advanced");
7542 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7543 let keys = self.context.get_holder_pubkeys();
7546 common_fields: msgs::CommonOpenChannelFields {
7548 temporary_channel_id: self.context.channel_id,
7549 funding_satoshis: self.context.channel_value_satoshis,
7550 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7551 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7552 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7553 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7554 to_self_delay: self.context.get_holder_selected_contest_delay(),
7555 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7556 funding_pubkey: keys.funding_pubkey,
7557 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7558 payment_basepoint: keys.payment_point,
7559 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7560 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7561 first_per_commitment_point,
7562 channel_flags: if self.context.config.announced_channel {1} else {0},
7563 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7564 Some(script) => script.clone().into_inner(),
7565 None => Builder::new().into_script(),
7567 channel_type: Some(self.context.channel_type.clone()),
7569 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7570 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7575 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7576 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7578 // Check sanity of message fields:
7579 if !self.context.is_outbound() {
7580 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7582 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7583 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7585 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7586 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7588 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7589 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7591 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7592 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7594 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7595 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7596 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7598 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7599 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7600 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7602 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7603 if msg.common_fields.to_self_delay > max_delay_acceptable {
7604 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7606 if msg.common_fields.max_accepted_htlcs < 1 {
7607 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7609 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7610 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7613 // Now check against optional parameters as set by config...
7614 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7615 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7617 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7618 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7620 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7621 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7623 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7624 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7626 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7627 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7629 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7630 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7632 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7633 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7636 if let Some(ty) = &msg.common_fields.channel_type {
7637 if *ty != self.context.channel_type {
7638 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7640 } else if their_features.supports_channel_type() {
7641 // Assume they've accepted the channel type as they said they understand it.
7643 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7644 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7645 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7647 self.context.channel_type = channel_type.clone();
7648 self.context.channel_transaction_parameters.channel_type_features = channel_type;
7651 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7652 match &msg.common_fields.shutdown_scriptpubkey {
7653 &Some(ref script) => {
7654 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7655 if script.len() == 0 {
7658 if !script::is_bolt2_compliant(&script, their_features) {
7659 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7661 Some(script.clone())
7664 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7666 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7671 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7672 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7673 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7674 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7675 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7677 if peer_limits.trust_own_funding_0conf {
7678 self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7680 self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7683 let counterparty_pubkeys = ChannelPublicKeys {
7684 funding_pubkey: msg.common_fields.funding_pubkey,
7685 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7686 payment_point: msg.common_fields.payment_basepoint,
7687 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7688 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7691 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7692 selected_contest_delay: msg.common_fields.to_self_delay,
7693 pubkeys: counterparty_pubkeys,
7696 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7697 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7699 self.context.channel_state = ChannelState::NegotiatingFunding(
7700 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7702 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7707 /// Handles a funding_signed message from the remote end.
7708 /// If this call is successful, broadcast the funding transaction (and not before!)
7709 pub fn funding_signed<L: Deref>(
7710 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7711 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7715 if !self.context.is_outbound() {
7716 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7718 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7719 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7721 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7722 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7723 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7724 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7727 let funding_script = self.context.get_funding_redeemscript();
7729 let counterparty_keys = self.context.build_remote_transaction_keys();
7730 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7731 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7732 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7734 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7735 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7737 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7738 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7740 let trusted_tx = initial_commitment_tx.trust();
7741 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7742 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7743 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7744 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7745 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7749 let holder_commitment_tx = HolderCommitmentTransaction::new(
7750 initial_commitment_tx,
7753 &self.context.get_holder_pubkeys().funding_pubkey,
7754 self.context.counterparty_funding_pubkey()
7758 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7759 if validated.is_err() {
7760 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7763 let funding_redeemscript = self.context.get_funding_redeemscript();
7764 let funding_txo = self.context.get_funding_txo().unwrap();
7765 let funding_txo_script = funding_redeemscript.to_p2wsh();
7766 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7767 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7768 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7769 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7770 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7771 shutdown_script, self.context.get_holder_selected_contest_delay(),
7772 &self.context.destination_script, (funding_txo, funding_txo_script),
7773 &self.context.channel_transaction_parameters,
7774 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7776 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7777 channel_monitor.provide_initial_counterparty_commitment_tx(
7778 counterparty_initial_bitcoin_tx.txid, Vec::new(),
7779 self.context.cur_counterparty_commitment_transaction_number,
7780 self.context.counterparty_cur_commitment_point.unwrap(),
7781 counterparty_initial_commitment_tx.feerate_per_kw(),
7782 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7783 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7785 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7786 if self.context.is_batch_funding() {
7787 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7789 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7791 self.context.cur_holder_commitment_transaction_number -= 1;
7792 self.context.cur_counterparty_commitment_transaction_number -= 1;
7794 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7796 let mut channel = Channel {
7797 context: self.context,
7798 #[cfg(any(dual_funding, splicing))]
7799 dual_funding_channel_context: None,
7802 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7803 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7804 Ok((channel, channel_monitor))
7807 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7809 #[cfg(async_signing)]
7810 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7811 if self.context.signer_pending_funding && self.context.is_outbound() {
7812 log_trace!(logger, "Signer unblocked a funding_created");
7813 self.get_funding_created_msg(logger)
7818 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7819 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7820 pub context: ChannelContext<SP>,
7821 pub unfunded_context: UnfundedChannelContext,
7824 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7825 /// [`msgs::CommonOpenChannelFields`].
7826 pub(super) fn channel_type_from_open_channel(
7827 common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7828 our_supported_features: &ChannelTypeFeatures
7829 ) -> Result<ChannelTypeFeatures, ChannelError> {
7830 if let Some(channel_type) = &common_fields.channel_type {
7831 if channel_type.supports_any_optional_bits() {
7832 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7835 // We only support the channel types defined by the `ChannelManager` in
7836 // `provided_channel_type_features`. The channel type must always support
7837 // `static_remote_key`.
7838 if !channel_type.requires_static_remote_key() {
7839 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7841 // Make sure we support all of the features behind the channel type.
7842 if !channel_type.is_subset(our_supported_features) {
7843 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7845 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7846 if channel_type.requires_scid_privacy() && announced_channel {
7847 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7849 Ok(channel_type.clone())
7851 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7852 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7853 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7859 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7860 /// Creates a new channel from a remote sides' request for one.
7861 /// Assumes chain_hash has already been checked and corresponds with what we expect!
7862 pub fn new<ES: Deref, F: Deref, L: Deref>(
7863 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7864 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7865 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7866 current_chain_height: u32, logger: &L, is_0conf: bool,
7867 ) -> Result<InboundV1Channel<SP>, ChannelError>
7868 where ES::Target: EntropySource,
7869 F::Target: FeeEstimator,
7872 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None);
7874 // First check the channel type is known, failing before we do anything else if we don't
7875 // support this channel type.
7876 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7878 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7879 let counterparty_pubkeys = ChannelPublicKeys {
7880 funding_pubkey: msg.common_fields.funding_pubkey,
7881 revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7882 payment_point: msg.common_fields.payment_basepoint,
7883 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7884 htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7888 context: ChannelContext::new_for_inbound_channel(
7892 counterparty_node_id,
7896 current_chain_height,
7901 counterparty_pubkeys,
7903 holder_selected_channel_reserve_satoshis,
7904 msg.channel_reserve_satoshis,
7906 msg.common_fields.clone(),
7908 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7913 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7914 /// should be sent back to the counterparty node.
7916 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7917 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7918 if self.context.is_outbound() {
7919 panic!("Tried to send accept_channel for an outbound channel?");
7922 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7923 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7925 panic!("Tried to send accept_channel after channel had moved forward");
7927 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7928 panic!("Tried to send an accept_channel for a channel that has already advanced");
7931 self.generate_accept_channel_message()
7934 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7935 /// inbound channel. If the intention is to accept an inbound channel, use
7936 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7938 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7939 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7940 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7941 let keys = self.context.get_holder_pubkeys();
7943 msgs::AcceptChannel {
7944 common_fields: msgs::CommonAcceptChannelFields {
7945 temporary_channel_id: self.context.channel_id,
7946 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7947 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7948 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7949 minimum_depth: self.context.minimum_depth.unwrap(),
7950 to_self_delay: self.context.get_holder_selected_contest_delay(),
7951 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7952 funding_pubkey: keys.funding_pubkey,
7953 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7954 payment_basepoint: keys.payment_point,
7955 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7956 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7957 first_per_commitment_point,
7958 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7959 Some(script) => script.clone().into_inner(),
7960 None => Builder::new().into_script(),
7962 channel_type: Some(self.context.channel_type.clone()),
7964 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7966 next_local_nonce: None,
7970 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7971 /// inbound channel without accepting it.
7973 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7975 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7976 self.generate_accept_channel_message()
7979 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7980 let funding_script = self.context.get_funding_redeemscript();
7982 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7983 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7984 let trusted_tx = initial_commitment_tx.trust();
7985 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7986 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7987 // They sign the holder commitment transaction...
7988 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7989 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7990 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7991 encode::serialize_hex(&funding_script), &self.context.channel_id());
7992 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7994 Ok(initial_commitment_tx)
7997 pub fn funding_created<L: Deref>(
7998 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7999 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
8003 if self.context.is_outbound() {
8004 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
8007 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8008 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8010 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
8011 // remember the channel, so it's safe to just send an error_message here and drop the
8013 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
8015 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
8016 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
8017 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8018 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
8021 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
8022 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
8023 // This is an externally observable change before we finish all our checks. In particular
8024 // check_funding_created_signature may fail.
8025 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
8027 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
8029 Err(ChannelError::Close(e)) => {
8030 self.context.channel_transaction_parameters.funding_outpoint = None;
8031 return Err((self, ChannelError::Close(e)));
8034 // The only error we know how to handle is ChannelError::Close, so we fall over here
8035 // to make sure we don't continue with an inconsistent state.
8036 panic!("unexpected error type from check_funding_created_signature {:?}", e);
8040 let holder_commitment_tx = HolderCommitmentTransaction::new(
8041 initial_commitment_tx,
8044 &self.context.get_holder_pubkeys().funding_pubkey,
8045 self.context.counterparty_funding_pubkey()
8048 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
8049 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
8052 // Now that we're past error-generating stuff, update our local state:
8054 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
8055 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
8056 self.context.cur_counterparty_commitment_transaction_number -= 1;
8057 self.context.cur_holder_commitment_transaction_number -= 1;
8059 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
8061 let funding_redeemscript = self.context.get_funding_redeemscript();
8062 let funding_txo_script = funding_redeemscript.to_p2wsh();
8063 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
8064 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
8065 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
8066 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
8067 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
8068 shutdown_script, self.context.get_holder_selected_contest_delay(),
8069 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
8070 &self.context.channel_transaction_parameters,
8071 funding_redeemscript.clone(), self.context.channel_value_satoshis,
8073 holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
8074 channel_monitor.provide_initial_counterparty_commitment_tx(
8075 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
8076 self.context.cur_counterparty_commitment_transaction_number + 1,
8077 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
8078 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
8079 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
8081 log_info!(logger, "{} funding_signed for peer for channel {}",
8082 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
8084 // Promote the channel to a full-fledged one now that we have updated the state and have a
8085 // `ChannelMonitor`.
8086 let mut channel = Channel {
8087 context: self.context,
8088 #[cfg(any(dual_funding, splicing))]
8089 dual_funding_channel_context: None,
8091 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
8092 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
8094 Ok((channel, funding_signed, channel_monitor))
8098 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
8099 #[cfg(any(dual_funding, splicing))]
8100 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8101 pub context: ChannelContext<SP>,
8102 pub unfunded_context: UnfundedChannelContext,
8103 #[cfg(any(dual_funding, splicing))]
8104 pub dual_funding_context: DualFundingChannelContext,
8107 #[cfg(any(dual_funding, splicing))]
8108 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
8109 pub fn new<ES: Deref, F: Deref>(
8110 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8111 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
8112 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
8113 funding_confirmation_target: ConfirmationTarget,
8114 ) -> Result<OutboundV2Channel<SP>, APIError>
8115 where ES::Target: EntropySource,
8116 F::Target: FeeEstimator,
8118 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
8119 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
8120 let pubkeys = holder_signer.pubkeys().clone();
8122 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
8124 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8125 funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8127 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
8128 let funding_tx_locktime = current_chain_height;
8131 context: ChannelContext::new_for_outbound_channel(
8135 counterparty_node_id,
8141 current_chain_height,
8142 outbound_scid_alias,
8143 temporary_channel_id,
8144 holder_selected_channel_reserve_satoshis,
8149 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8150 dual_funding_context: DualFundingChannelContext {
8151 our_funding_satoshis: funding_satoshis,
8152 their_funding_satoshis: 0,
8153 funding_tx_locktime,
8154 funding_feerate_sat_per_1000_weight,
8160 /// If we receive an error message, it may only be a rejection of the channel type we tried,
8161 /// not of our ability to open any channel at all. Thus, on error, we should first call this
8162 /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
8163 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
8164 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
8165 ) -> Result<msgs::OpenChannelV2, ()>
8167 F::Target: FeeEstimator
8169 self.context.maybe_downgrade_channel_features(fee_estimator)?;
8170 Ok(self.get_open_channel_v2(chain_hash))
8173 pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
8174 if self.context.have_received_message() {
8175 debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
8178 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8179 debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
8182 let first_per_commitment_point = self.context.holder_signer.as_ref()
8183 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
8184 &self.context.secp_ctx);
8185 let second_per_commitment_point = self.context.holder_signer.as_ref()
8186 .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
8187 &self.context.secp_ctx);
8188 let keys = self.context.get_holder_pubkeys();
8190 msgs::OpenChannelV2 {
8191 common_fields: msgs::CommonOpenChannelFields {
8193 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8194 funding_satoshis: self.context.channel_value_satoshis,
8195 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8196 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8197 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8198 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8199 to_self_delay: self.context.get_holder_selected_contest_delay(),
8200 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8201 funding_pubkey: keys.funding_pubkey,
8202 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8203 payment_basepoint: keys.payment_point,
8204 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8205 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8206 first_per_commitment_point,
8207 channel_flags: if self.context.config.announced_channel {1} else {0},
8208 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8209 Some(script) => script.clone().into_inner(),
8210 None => Builder::new().into_script(),
8212 channel_type: Some(self.context.channel_type.clone()),
8214 funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8215 second_per_commitment_point,
8216 locktime: self.dual_funding_context.funding_tx_locktime,
8217 require_confirmed_inputs: None,
8222 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8223 #[cfg(any(dual_funding, splicing))]
8224 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8225 pub context: ChannelContext<SP>,
8226 pub unfunded_context: UnfundedChannelContext,
8227 pub dual_funding_context: DualFundingChannelContext,
8230 #[cfg(any(dual_funding, splicing))]
8231 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8232 /// Creates a new dual-funded channel from a remote side's request for one.
8233 /// Assumes chain_hash has already been checked and corresponds with what we expect!
8234 pub fn new<ES: Deref, F: Deref, L: Deref>(
8235 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8236 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8237 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8238 config: &UserConfig, current_chain_height: u32, logger: &L,
8239 ) -> Result<InboundV2Channel<SP>, ChannelError>
8240 where ES::Target: EntropySource,
8241 F::Target: FeeEstimator,
8244 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8245 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8246 channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8247 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8248 channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8250 // First check the channel type is known, failing before we do anything else if we don't
8251 // support this channel type.
8252 if msg.common_fields.channel_type.is_none() {
8253 return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8254 msg.common_fields.temporary_channel_id)))
8256 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8258 let counterparty_pubkeys = ChannelPublicKeys {
8259 funding_pubkey: msg.common_fields.funding_pubkey,
8260 revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8261 payment_point: msg.common_fields.payment_basepoint,
8262 delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8263 htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8266 let mut context = ChannelContext::new_for_inbound_channel(
8270 counterparty_node_id,
8274 current_chain_height,
8280 counterparty_pubkeys,
8282 holder_selected_channel_reserve_satoshis,
8283 counterparty_selected_channel_reserve_satoshis,
8284 0 /* push_msat not used in dual-funding */,
8285 msg.common_fields.clone(),
8287 let channel_id = ChannelId::v2_from_revocation_basepoints(
8288 &context.get_holder_pubkeys().revocation_basepoint,
8289 &context.get_counterparty_pubkeys().revocation_basepoint);
8290 context.channel_id = channel_id;
8294 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8295 dual_funding_context: DualFundingChannelContext {
8296 our_funding_satoshis: funding_satoshis,
8297 their_funding_satoshis: msg.common_fields.funding_satoshis,
8298 funding_tx_locktime: msg.locktime,
8299 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8306 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8307 /// should be sent back to the counterparty node.
8309 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8310 pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8311 if self.context.is_outbound() {
8312 debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8315 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8316 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8318 debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8320 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8321 debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8324 self.generate_accept_channel_v2_message()
8327 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8328 /// inbound channel. If the intention is to accept an inbound channel, use
8329 /// [`InboundV1Channel::accept_inbound_channel`] instead.
8331 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8332 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8333 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8334 self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8335 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8336 self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8337 let keys = self.context.get_holder_pubkeys();
8339 msgs::AcceptChannelV2 {
8340 common_fields: msgs::CommonAcceptChannelFields {
8341 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8342 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8343 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8344 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8345 minimum_depth: self.context.minimum_depth.unwrap(),
8346 to_self_delay: self.context.get_holder_selected_contest_delay(),
8347 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8348 funding_pubkey: keys.funding_pubkey,
8349 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8350 payment_basepoint: keys.payment_point,
8351 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8352 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8353 first_per_commitment_point,
8354 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8355 Some(script) => script.clone().into_inner(),
8356 None => Builder::new().into_script(),
8358 channel_type: Some(self.context.channel_type.clone()),
8360 funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8361 second_per_commitment_point,
8362 require_confirmed_inputs: None,
8366 /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8367 /// inbound channel without accepting it.
8369 /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8371 pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8372 self.generate_accept_channel_v2_message()
8376 // Unfunded channel utilities
8378 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8379 // The default channel type (ie the first one we try) depends on whether the channel is
8380 // public - if it is, we just go with `only_static_remotekey` as it's the only option
8381 // available. If it's private, we first try `scid_privacy` as it provides better privacy
8382 // with no other changes, and fall back to `only_static_remotekey`.
8383 let mut ret = ChannelTypeFeatures::only_static_remote_key();
8384 if !config.channel_handshake_config.announced_channel &&
8385 config.channel_handshake_config.negotiate_scid_privacy &&
8386 their_features.supports_scid_privacy() {
8387 ret.set_scid_privacy_required();
8390 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8391 // set it now. If they don't understand it, we'll fall back to our default of
8392 // `only_static_remotekey`.
8393 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8394 their_features.supports_anchors_zero_fee_htlc_tx() {
8395 ret.set_anchors_zero_fee_htlc_tx_required();
8401 const SERIALIZATION_VERSION: u8 = 4;
8402 const MIN_SERIALIZATION_VERSION: u8 = 3;
8404 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8410 impl Writeable for ChannelUpdateStatus {
8411 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8412 // We only care about writing out the current state as it was announced, ie only either
8413 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8414 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8416 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8417 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8418 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8419 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8425 impl Readable for ChannelUpdateStatus {
8426 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8427 Ok(match <u8 as Readable>::read(reader)? {
8428 0 => ChannelUpdateStatus::Enabled,
8429 1 => ChannelUpdateStatus::Disabled,
8430 _ => return Err(DecodeError::InvalidValue),
8435 impl Writeable for AnnouncementSigsState {
8436 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8437 // We only care about writing out the current state as if we had just disconnected, at
8438 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8440 AnnouncementSigsState::NotSent => 0u8.write(writer),
8441 AnnouncementSigsState::MessageSent => 0u8.write(writer),
8442 AnnouncementSigsState::Committed => 0u8.write(writer),
8443 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8448 impl Readable for AnnouncementSigsState {
8449 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8450 Ok(match <u8 as Readable>::read(reader)? {
8451 0 => AnnouncementSigsState::NotSent,
8452 1 => AnnouncementSigsState::PeerReceived,
8453 _ => return Err(DecodeError::InvalidValue),
8458 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8459 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8460 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8463 let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
8464 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
8465 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8466 matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
8470 SERIALIZATION_VERSION
8472 MIN_SERIALIZATION_VERSION
8474 write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
8476 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8477 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8478 // the low bytes now and the optional high bytes later.
8479 let user_id_low = self.context.user_id as u64;
8480 user_id_low.write(writer)?;
8482 // Version 1 deserializers expected to read parts of the config object here. Version 2
8483 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8484 // `minimum_depth` we simply write dummy values here.
8485 writer.write_all(&[0; 8])?;
8487 self.context.channel_id.write(writer)?;
8489 let mut channel_state = self.context.channel_state;
8490 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8491 channel_state.set_peer_disconnected();
8493 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8495 channel_state.to_u32().write(writer)?;
8497 self.context.channel_value_satoshis.write(writer)?;
8499 self.context.latest_monitor_update_id.write(writer)?;
8501 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8502 // deserialized from that format.
8503 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8504 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8505 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8507 self.context.destination_script.write(writer)?;
8509 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8510 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8511 self.context.value_to_self_msat.write(writer)?;
8513 let mut dropped_inbound_htlcs = 0;
8514 for htlc in self.context.pending_inbound_htlcs.iter() {
8515 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8516 dropped_inbound_htlcs += 1;
8519 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8520 for htlc in self.context.pending_inbound_htlcs.iter() {
8521 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8524 htlc.htlc_id.write(writer)?;
8525 htlc.amount_msat.write(writer)?;
8526 htlc.cltv_expiry.write(writer)?;
8527 htlc.payment_hash.write(writer)?;
8529 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8530 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
8532 if version_to_write <= 3 {
8533 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8534 pending_htlc_status.write(writer)?;
8539 htlc_resolution.write(writer)?;
8542 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8544 if version_to_write <= 3 {
8545 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8546 pending_htlc_status.write(writer)?;
8551 htlc_resolution.write(writer)?;
8554 &InboundHTLCState::Committed => {
8557 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8559 removal_reason.write(writer)?;
8564 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8565 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8566 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8568 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8569 for htlc in self.context.pending_outbound_htlcs.iter() {
8570 htlc.htlc_id.write(writer)?;
8571 htlc.amount_msat.write(writer)?;
8572 htlc.cltv_expiry.write(writer)?;
8573 htlc.payment_hash.write(writer)?;
8574 htlc.source.write(writer)?;
8576 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8578 onion_packet.write(writer)?;
8580 &OutboundHTLCState::Committed => {
8583 &OutboundHTLCState::RemoteRemoved(_) => {
8584 // Treat this as a Committed because we haven't received the CS - they'll
8585 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8588 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8590 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8591 preimages.push(preimage);
8593 let reason: Option<&HTLCFailReason> = outcome.into();
8594 reason.write(writer)?;
8596 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8598 if let OutboundHTLCOutcome::Success(preimage) = outcome {
8599 preimages.push(preimage);
8601 let reason: Option<&HTLCFailReason> = outcome.into();
8602 reason.write(writer)?;
8605 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8606 pending_outbound_blinding_points.push(htlc.blinding_point);
8609 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8610 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8611 // Vec of (htlc_id, failure_code, sha256_of_onion)
8612 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8613 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8614 for update in self.context.holding_cell_htlc_updates.iter() {
8616 &HTLCUpdateAwaitingACK::AddHTLC {
8617 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8618 blinding_point, skimmed_fee_msat,
8621 amount_msat.write(writer)?;
8622 cltv_expiry.write(writer)?;
8623 payment_hash.write(writer)?;
8624 source.write(writer)?;
8625 onion_routing_packet.write(writer)?;
8627 holding_cell_skimmed_fees.push(skimmed_fee_msat);
8628 holding_cell_blinding_points.push(blinding_point);
8630 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8632 payment_preimage.write(writer)?;
8633 htlc_id.write(writer)?;
8635 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8637 htlc_id.write(writer)?;
8638 err_packet.write(writer)?;
8640 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8641 htlc_id, failure_code, sha256_of_onion
8643 // We don't want to break downgrading by adding a new variant, so write a dummy
8644 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8645 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8647 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8649 htlc_id.write(writer)?;
8650 dummy_err_packet.write(writer)?;
8655 match self.context.resend_order {
8656 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8657 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8660 self.context.monitor_pending_channel_ready.write(writer)?;
8661 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8662 self.context.monitor_pending_commitment_signed.write(writer)?;
8664 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8665 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8666 pending_forward.write(writer)?;
8667 htlc_id.write(writer)?;
8670 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8671 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8672 htlc_source.write(writer)?;
8673 payment_hash.write(writer)?;
8674 fail_reason.write(writer)?;
8677 if self.context.is_outbound() {
8678 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8679 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8680 Some(feerate).write(writer)?;
8682 // As for inbound HTLCs, if the update was only announced and never committed in a
8683 // commitment_signed, drop it.
8684 None::<u32>.write(writer)?;
8686 self.context.holding_cell_update_fee.write(writer)?;
8688 self.context.next_holder_htlc_id.write(writer)?;
8689 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8690 self.context.update_time_counter.write(writer)?;
8691 self.context.feerate_per_kw.write(writer)?;
8693 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8694 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8695 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8696 // consider the stale state on reload.
8699 self.context.funding_tx_confirmed_in.write(writer)?;
8700 self.context.funding_tx_confirmation_height.write(writer)?;
8701 self.context.short_channel_id.write(writer)?;
8703 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8704 self.context.holder_dust_limit_satoshis.write(writer)?;
8705 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8707 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8708 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8710 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8711 self.context.holder_htlc_minimum_msat.write(writer)?;
8712 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8714 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8715 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8717 match &self.context.counterparty_forwarding_info {
8720 info.fee_base_msat.write(writer)?;
8721 info.fee_proportional_millionths.write(writer)?;
8722 info.cltv_expiry_delta.write(writer)?;
8724 None => 0u8.write(writer)?
8727 self.context.channel_transaction_parameters.write(writer)?;
8728 self.context.funding_transaction.write(writer)?;
8730 self.context.counterparty_cur_commitment_point.write(writer)?;
8731 self.context.counterparty_prev_commitment_point.write(writer)?;
8732 self.context.counterparty_node_id.write(writer)?;
8734 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8736 self.context.commitment_secrets.write(writer)?;
8738 self.context.channel_update_status.write(writer)?;
8740 #[cfg(any(test, fuzzing))]
8741 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8742 #[cfg(any(test, fuzzing))]
8743 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8744 htlc.write(writer)?;
8747 // If the channel type is something other than only-static-remote-key, then we need to have
8748 // older clients fail to deserialize this channel at all. If the type is
8749 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8751 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8752 Some(&self.context.channel_type) } else { None };
8754 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8755 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8756 // a different percentage of the channel value then 10%, which older versions of LDK used
8757 // to set it to before the percentage was made configurable.
8758 let serialized_holder_selected_reserve =
8759 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8760 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8762 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8763 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8764 let serialized_holder_htlc_max_in_flight =
8765 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8766 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8768 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8769 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8771 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8772 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8773 // we write the high bytes as an option here.
8774 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8776 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8778 let mut monitor_pending_update_adds = None;
8779 if !self.context.monitor_pending_update_adds.is_empty() {
8780 monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
8783 write_tlv_fields!(writer, {
8784 (0, self.context.announcement_sigs, option),
8785 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8786 // default value instead of being Option<>al. Thus, to maintain compatibility we write
8787 // them twice, once with their original default values above, and once as an option
8788 // here. On the read side, old versions will simply ignore the odd-type entries here,
8789 // and new versions map the default values to None and allow the TLV entries here to
8791 (1, self.context.minimum_depth, option),
8792 (2, chan_type, option),
8793 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8794 (4, serialized_holder_selected_reserve, option),
8795 (5, self.context.config, required),
8796 (6, serialized_holder_htlc_max_in_flight, option),
8797 (7, self.context.shutdown_scriptpubkey, option),
8798 (8, self.context.blocked_monitor_updates, optional_vec),
8799 (9, self.context.target_closing_feerate_sats_per_kw, option),
8800 (10, monitor_pending_update_adds, option), // Added in 0.0.122
8801 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8802 (13, self.context.channel_creation_height, required),
8803 (15, preimages, required_vec),
8804 (17, self.context.announcement_sigs_state, required),
8805 (19, self.context.latest_inbound_scid_alias, option),
8806 (21, self.context.outbound_scid_alias, required),
8807 (23, channel_ready_event_emitted, option),
8808 (25, user_id_high_opt, option),
8809 (27, self.context.channel_keys_id, required),
8810 (28, holder_max_accepted_htlcs, option),
8811 (29, self.context.temporary_channel_id, option),
8812 (31, channel_pending_event_emitted, option),
8813 (35, pending_outbound_skimmed_fees, optional_vec),
8814 (37, holding_cell_skimmed_fees, optional_vec),
8815 (38, self.context.is_batch_funding, option),
8816 (39, pending_outbound_blinding_points, optional_vec),
8817 (41, holding_cell_blinding_points, optional_vec),
8818 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8819 // 45 and 47 are reserved for async signing
8820 (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8827 const MAX_ALLOC_SIZE: usize = 64*1024;
8828 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8830 ES::Target: EntropySource,
8831 SP::Target: SignerProvider
8833 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8834 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8835 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8837 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8838 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8839 // the low bytes now and the high bytes later.
8840 let user_id_low: u64 = Readable::read(reader)?;
8842 let mut config = Some(LegacyChannelConfig::default());
8844 // Read the old serialization of the ChannelConfig from version 0.0.98.
8845 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8846 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8847 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8848 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8850 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8851 let mut _val: u64 = Readable::read(reader)?;
8854 let channel_id = Readable::read(reader)?;
8855 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8856 let channel_value_satoshis = Readable::read(reader)?;
8858 let latest_monitor_update_id = Readable::read(reader)?;
8860 let mut keys_data = None;
8862 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8863 // the `channel_keys_id` TLV is present below.
8864 let keys_len: u32 = Readable::read(reader)?;
8865 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8866 while keys_data.as_ref().unwrap().len() != keys_len as usize {
8867 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8868 let mut data = [0; 1024];
8869 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8870 reader.read_exact(read_slice)?;
8871 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8875 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8876 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8877 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8880 let destination_script = Readable::read(reader)?;
8882 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8883 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8884 let value_to_self_msat = Readable::read(reader)?;
8886 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8888 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8889 for _ in 0..pending_inbound_htlc_count {
8890 pending_inbound_htlcs.push(InboundHTLCOutput {
8891 htlc_id: Readable::read(reader)?,
8892 amount_msat: Readable::read(reader)?,
8893 cltv_expiry: Readable::read(reader)?,
8894 payment_hash: Readable::read(reader)?,
8895 state: match <u8 as Readable>::read(reader)? {
8897 let resolution = if ver <= 3 {
8898 InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8900 Readable::read(reader)?
8902 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
8905 let resolution = if ver <= 3 {
8906 InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8908 Readable::read(reader)?
8910 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
8912 3 => InboundHTLCState::Committed,
8913 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8914 _ => return Err(DecodeError::InvalidValue),
8919 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8920 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8921 for _ in 0..pending_outbound_htlc_count {
8922 pending_outbound_htlcs.push(OutboundHTLCOutput {
8923 htlc_id: Readable::read(reader)?,
8924 amount_msat: Readable::read(reader)?,
8925 cltv_expiry: Readable::read(reader)?,
8926 payment_hash: Readable::read(reader)?,
8927 source: Readable::read(reader)?,
8928 state: match <u8 as Readable>::read(reader)? {
8929 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8930 1 => OutboundHTLCState::Committed,
8932 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8933 OutboundHTLCState::RemoteRemoved(option.into())
8936 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8937 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8940 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8941 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8943 _ => return Err(DecodeError::InvalidValue),
8945 skimmed_fee_msat: None,
8946 blinding_point: None,
8950 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8951 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8952 for _ in 0..holding_cell_htlc_update_count {
8953 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8954 0 => HTLCUpdateAwaitingACK::AddHTLC {
8955 amount_msat: Readable::read(reader)?,
8956 cltv_expiry: Readable::read(reader)?,
8957 payment_hash: Readable::read(reader)?,
8958 source: Readable::read(reader)?,
8959 onion_routing_packet: Readable::read(reader)?,
8960 skimmed_fee_msat: None,
8961 blinding_point: None,
8963 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8964 payment_preimage: Readable::read(reader)?,
8965 htlc_id: Readable::read(reader)?,
8967 2 => HTLCUpdateAwaitingACK::FailHTLC {
8968 htlc_id: Readable::read(reader)?,
8969 err_packet: Readable::read(reader)?,
8971 _ => return Err(DecodeError::InvalidValue),
8975 let resend_order = match <u8 as Readable>::read(reader)? {
8976 0 => RAACommitmentOrder::CommitmentFirst,
8977 1 => RAACommitmentOrder::RevokeAndACKFirst,
8978 _ => return Err(DecodeError::InvalidValue),
8981 let monitor_pending_channel_ready = Readable::read(reader)?;
8982 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8983 let monitor_pending_commitment_signed = Readable::read(reader)?;
8985 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8986 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8987 for _ in 0..monitor_pending_forwards_count {
8988 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8991 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8992 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8993 for _ in 0..monitor_pending_failures_count {
8994 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8997 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8999 let holding_cell_update_fee = Readable::read(reader)?;
9001 let next_holder_htlc_id = Readable::read(reader)?;
9002 let next_counterparty_htlc_id = Readable::read(reader)?;
9003 let update_time_counter = Readable::read(reader)?;
9004 let feerate_per_kw = Readable::read(reader)?;
9006 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
9007 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
9008 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
9009 // consider the stale state on reload.
9010 match <u8 as Readable>::read(reader)? {
9013 let _: u32 = Readable::read(reader)?;
9014 let _: u64 = Readable::read(reader)?;
9015 let _: Signature = Readable::read(reader)?;
9017 _ => return Err(DecodeError::InvalidValue),
9020 let funding_tx_confirmed_in = Readable::read(reader)?;
9021 let funding_tx_confirmation_height = Readable::read(reader)?;
9022 let short_channel_id = Readable::read(reader)?;
9024 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
9025 let holder_dust_limit_satoshis = Readable::read(reader)?;
9026 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
9027 let mut counterparty_selected_channel_reserve_satoshis = None;
9029 // Read the old serialization from version 0.0.98.
9030 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
9032 // Read the 8 bytes of backwards-compatibility data.
9033 let _dummy: u64 = Readable::read(reader)?;
9035 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
9036 let holder_htlc_minimum_msat = Readable::read(reader)?;
9037 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
9039 let mut minimum_depth = None;
9041 // Read the old serialization from version 0.0.98.
9042 minimum_depth = Some(Readable::read(reader)?);
9044 // Read the 4 bytes of backwards-compatibility data.
9045 let _dummy: u32 = Readable::read(reader)?;
9048 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
9050 1 => Some(CounterpartyForwardingInfo {
9051 fee_base_msat: Readable::read(reader)?,
9052 fee_proportional_millionths: Readable::read(reader)?,
9053 cltv_expiry_delta: Readable::read(reader)?,
9055 _ => return Err(DecodeError::InvalidValue),
9058 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
9059 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
9061 let counterparty_cur_commitment_point = Readable::read(reader)?;
9063 let counterparty_prev_commitment_point = Readable::read(reader)?;
9064 let counterparty_node_id = Readable::read(reader)?;
9066 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
9067 let commitment_secrets = Readable::read(reader)?;
9069 let channel_update_status = Readable::read(reader)?;
9071 #[cfg(any(test, fuzzing))]
9072 let mut historical_inbound_htlc_fulfills = new_hash_set();
9073 #[cfg(any(test, fuzzing))]
9075 let htlc_fulfills_len: u64 = Readable::read(reader)?;
9076 for _ in 0..htlc_fulfills_len {
9077 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
9081 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
9082 Some((feerate, if channel_parameters.is_outbound_from_holder {
9083 FeeUpdateState::Outbound
9085 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
9091 let mut announcement_sigs = None;
9092 let mut target_closing_feerate_sats_per_kw = None;
9093 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
9094 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
9095 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
9096 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
9097 // only, so we default to that if none was written.
9098 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
9099 let mut channel_creation_height = Some(serialized_height);
9100 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
9102 // If we read an old Channel, for simplicity we just treat it as "we never sent an
9103 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
9104 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
9105 let mut latest_inbound_scid_alias = None;
9106 let mut outbound_scid_alias = None;
9107 let mut channel_pending_event_emitted = None;
9108 let mut channel_ready_event_emitted = None;
9110 let mut user_id_high_opt: Option<u64> = None;
9111 let mut channel_keys_id: Option<[u8; 32]> = None;
9112 let mut temporary_channel_id: Option<ChannelId> = None;
9113 let mut holder_max_accepted_htlcs: Option<u16> = None;
9115 let mut blocked_monitor_updates = Some(Vec::new());
9117 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9118 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9120 let mut is_batch_funding: Option<()> = None;
9122 let mut local_initiated_shutdown: Option<()> = None;
9124 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9125 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9127 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
9128 let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
9130 read_tlv_fields!(reader, {
9131 (0, announcement_sigs, option),
9132 (1, minimum_depth, option),
9133 (2, channel_type, option),
9134 (3, counterparty_selected_channel_reserve_satoshis, option),
9135 (4, holder_selected_channel_reserve_satoshis, option),
9136 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
9137 (6, holder_max_htlc_value_in_flight_msat, option),
9138 (7, shutdown_scriptpubkey, option),
9139 (8, blocked_monitor_updates, optional_vec),
9140 (9, target_closing_feerate_sats_per_kw, option),
9141 (10, monitor_pending_update_adds, option), // Added in 0.0.122
9142 (11, monitor_pending_finalized_fulfills, optional_vec),
9143 (13, channel_creation_height, option),
9144 (15, preimages_opt, optional_vec),
9145 (17, announcement_sigs_state, option),
9146 (19, latest_inbound_scid_alias, option),
9147 (21, outbound_scid_alias, option),
9148 (23, channel_ready_event_emitted, option),
9149 (25, user_id_high_opt, option),
9150 (27, channel_keys_id, option),
9151 (28, holder_max_accepted_htlcs, option),
9152 (29, temporary_channel_id, option),
9153 (31, channel_pending_event_emitted, option),
9154 (35, pending_outbound_skimmed_fees_opt, optional_vec),
9155 (37, holding_cell_skimmed_fees_opt, optional_vec),
9156 (38, is_batch_funding, option),
9157 (39, pending_outbound_blinding_points_opt, optional_vec),
9158 (41, holding_cell_blinding_points_opt, optional_vec),
9159 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
9160 // 45 and 47 are reserved for async signing
9161 (49, local_initiated_shutdown, option),
9164 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
9165 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
9166 // If we've gotten to the funding stage of the channel, populate the signer with its
9167 // required channel parameters.
9168 if channel_state >= ChannelState::FundingNegotiated {
9169 holder_signer.provide_channel_parameters(&channel_parameters);
9171 (channel_keys_id, holder_signer)
9173 // `keys_data` can be `None` if we had corrupted data.
9174 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
9175 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
9176 (holder_signer.channel_keys_id(), holder_signer)
9179 if let Some(preimages) = preimages_opt {
9180 let mut iter = preimages.into_iter();
9181 for htlc in pending_outbound_htlcs.iter_mut() {
9183 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
9184 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9186 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
9187 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9192 // We expect all preimages to be consumed above
9193 if iter.next().is_some() {
9194 return Err(DecodeError::InvalidValue);
9198 let chan_features = channel_type.as_ref().unwrap();
9199 if !chan_features.is_subset(our_supported_features) {
9200 // If the channel was written by a new version and negotiated with features we don't
9201 // understand yet, refuse to read it.
9202 return Err(DecodeError::UnknownRequiredFeature);
9205 // ChannelTransactionParameters may have had an empty features set upon deserialization.
9206 // To account for that, we're proactively setting/overriding the field here.
9207 channel_parameters.channel_type_features = chan_features.clone();
9209 let mut secp_ctx = Secp256k1::new();
9210 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
9212 // `user_id` used to be a single u64 value. In order to remain backwards
9213 // compatible with versions prior to 0.0.113, the u128 is serialized as two
9214 // separate u64 values.
9215 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
9217 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
9219 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
9220 let mut iter = skimmed_fees.into_iter();
9221 for htlc in pending_outbound_htlcs.iter_mut() {
9222 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9224 // We expect all skimmed fees to be consumed above
9225 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9227 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
9228 let mut iter = skimmed_fees.into_iter();
9229 for htlc in holding_cell_htlc_updates.iter_mut() {
9230 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
9231 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9234 // We expect all skimmed fees to be consumed above
9235 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9237 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
9238 let mut iter = blinding_pts.into_iter();
9239 for htlc in pending_outbound_htlcs.iter_mut() {
9240 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9242 // We expect all blinding points to be consumed above
9243 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9245 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
9246 let mut iter = blinding_pts.into_iter();
9247 for htlc in holding_cell_htlc_updates.iter_mut() {
9248 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
9249 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9252 // We expect all blinding points to be consumed above
9253 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9256 if let Some(malformed_htlcs) = malformed_htlcs {
9257 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9258 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9259 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9260 let matches = *htlc_id == malformed_htlc_id;
9261 if matches { debug_assert!(err_packet.data.is_empty()) }
9264 }).ok_or(DecodeError::InvalidValue)?;
9265 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9266 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9268 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9273 context: ChannelContext {
9276 config: config.unwrap(),
9280 // Note that we don't care about serializing handshake limits as we only ever serialize
9281 // channel data after the handshake has completed.
9282 inbound_handshake_limits_override: None,
9285 temporary_channel_id,
9287 announcement_sigs_state: announcement_sigs_state.unwrap(),
9289 channel_value_satoshis,
9291 latest_monitor_update_id,
9293 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9294 shutdown_scriptpubkey,
9297 cur_holder_commitment_transaction_number,
9298 cur_counterparty_commitment_transaction_number,
9301 holder_max_accepted_htlcs,
9302 pending_inbound_htlcs,
9303 pending_outbound_htlcs,
9304 holding_cell_htlc_updates,
9308 monitor_pending_channel_ready,
9309 monitor_pending_revoke_and_ack,
9310 monitor_pending_commitment_signed,
9311 monitor_pending_forwards,
9312 monitor_pending_failures,
9313 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9314 monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
9316 signer_pending_commitment_update: false,
9317 signer_pending_funding: false,
9320 holding_cell_update_fee,
9321 next_holder_htlc_id,
9322 next_counterparty_htlc_id,
9323 update_time_counter,
9326 #[cfg(debug_assertions)]
9327 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9328 #[cfg(debug_assertions)]
9329 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9331 last_sent_closing_fee: None,
9332 pending_counterparty_closing_signed: None,
9333 expecting_peer_commitment_signed: false,
9334 closing_fee_limits: None,
9335 target_closing_feerate_sats_per_kw,
9337 funding_tx_confirmed_in,
9338 funding_tx_confirmation_height,
9340 channel_creation_height: channel_creation_height.unwrap(),
9342 counterparty_dust_limit_satoshis,
9343 holder_dust_limit_satoshis,
9344 counterparty_max_htlc_value_in_flight_msat,
9345 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9346 counterparty_selected_channel_reserve_satoshis,
9347 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9348 counterparty_htlc_minimum_msat,
9349 holder_htlc_minimum_msat,
9350 counterparty_max_accepted_htlcs,
9353 counterparty_forwarding_info,
9355 channel_transaction_parameters: channel_parameters,
9356 funding_transaction,
9359 counterparty_cur_commitment_point,
9360 counterparty_prev_commitment_point,
9361 counterparty_node_id,
9363 counterparty_shutdown_scriptpubkey,
9367 channel_update_status,
9368 closing_signed_in_flight: false,
9372 #[cfg(any(test, fuzzing))]
9373 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9374 #[cfg(any(test, fuzzing))]
9375 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9377 workaround_lnd_bug_4006: None,
9378 sent_message_awaiting_response: None,
9380 latest_inbound_scid_alias,
9381 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9382 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9384 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9385 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9387 #[cfg(any(test, fuzzing))]
9388 historical_inbound_htlc_fulfills,
9390 channel_type: channel_type.unwrap(),
9393 local_initiated_shutdown,
9395 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9397 #[cfg(any(dual_funding, splicing))]
9398 dual_funding_channel_context: None,
9406 use bitcoin::amount::Amount;
9407 use bitcoin::blockdata::constants::ChainHash;
9408 use bitcoin::blockdata::script::{ScriptBuf, Builder};
9409 use bitcoin::blockdata::transaction::{Transaction, TxOut, Version};
9410 use bitcoin::blockdata::opcodes;
9411 use bitcoin::network::Network;
9412 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9413 use crate::ln::types::{PaymentHash, PaymentPreimage};
9414 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9415 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9416 use crate::ln::channel::InitFeatures;
9417 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9418 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9419 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9420 use crate::ln::msgs;
9421 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9422 use crate::ln::script::ShutdownScript;
9423 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9424 use crate::chain::BestBlock;
9425 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9426 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9427 use crate::chain::transaction::OutPoint;
9428 use crate::routing::router::{Path, RouteHop};
9429 use crate::util::config::UserConfig;
9430 use crate::util::errors::APIError;
9431 use crate::util::ser::{ReadableArgs, Writeable};
9432 use crate::util::test_utils;
9433 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9434 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9435 use bitcoin::secp256k1::ffi::Signature as FFISignature;
9436 use bitcoin::secp256k1::{SecretKey,PublicKey};
9437 use bitcoin::hashes::sha256::Hash as Sha256;
9438 use bitcoin::hashes::Hash;
9439 use bitcoin::hashes::hex::FromHex;
9440 use bitcoin::blockdata::locktime::absolute::LockTime;
9441 use bitcoin::{WitnessProgram, WitnessVersion, WPubkeyHash};
9442 use crate::prelude::*;
9445 fn test_channel_state_order() {
9446 use crate::ln::channel::NegotiatingFundingFlags;
9447 use crate::ln::channel::AwaitingChannelReadyFlags;
9448 use crate::ln::channel::ChannelReadyFlags;
9450 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9451 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9452 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9453 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9456 struct TestFeeEstimator {
9459 impl FeeEstimator for TestFeeEstimator {
9460 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9466 fn test_max_funding_satoshis_no_wumbo() {
9467 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9468 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9469 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9473 signer: InMemorySigner,
9476 impl EntropySource for Keys {
9477 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9480 impl SignerProvider for Keys {
9481 type EcdsaSigner = InMemorySigner;
9483 type TaprootSigner = InMemorySigner;
9485 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9486 self.signer.channel_keys_id()
9489 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9493 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9495 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9496 let secp_ctx = Secp256k1::signing_only();
9497 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9498 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9499 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9502 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9503 let secp_ctx = Secp256k1::signing_only();
9504 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9505 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9509 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9510 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9511 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9515 fn upfront_shutdown_script_incompatibility() {
9516 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9517 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9518 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9521 let seed = [42; 32];
9522 let network = Network::Testnet;
9523 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9524 keys_provider.expect(OnGetShutdownScriptpubkey {
9525 returns: non_v0_segwit_shutdown_script.clone(),
9528 let secp_ctx = Secp256k1::new();
9529 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9530 let config = UserConfig::default();
9531 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9532 Err(APIError::IncompatibleShutdownScript { script }) => {
9533 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9535 Err(e) => panic!("Unexpected error: {:?}", e),
9536 Ok(_) => panic!("Expected error"),
9540 // Check that, during channel creation, we use the same feerate in the open channel message
9541 // as we do in the Channel object creation itself.
9543 fn test_open_channel_msg_fee() {
9544 let original_fee = 253;
9545 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9546 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9547 let secp_ctx = Secp256k1::new();
9548 let seed = [42; 32];
9549 let network = Network::Testnet;
9550 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9552 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9553 let config = UserConfig::default();
9554 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9556 // Now change the fee so we can check that the fee in the open_channel message is the
9557 // same as the old fee.
9558 fee_est.fee_est = 500;
9559 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9560 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9564 fn test_holder_vs_counterparty_dust_limit() {
9565 // Test that when calculating the local and remote commitment transaction fees, the correct
9566 // dust limits are used.
9567 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9568 let secp_ctx = Secp256k1::new();
9569 let seed = [42; 32];
9570 let network = Network::Testnet;
9571 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9572 let logger = test_utils::TestLogger::new();
9573 let best_block = BestBlock::from_network(network);
9575 // Go through the flow of opening a channel between two nodes, making sure
9576 // they have different dust limits.
9578 // Create Node A's channel pointing to Node B's pubkey
9579 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9580 let config = UserConfig::default();
9581 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9583 // Create Node B's channel by receiving Node A's open_channel message
9584 // Make sure A's dust limit is as we expect.
9585 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9586 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9587 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9589 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9590 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9591 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9592 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9593 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9595 // Node A --> Node B: funding created
9596 let output_script = node_a_chan.context.get_funding_redeemscript();
9597 let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9598 value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
9600 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9601 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9602 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9604 // Node B --> Node A: funding signed
9605 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9606 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9608 // Put some inbound and outbound HTLCs in A's channel.
9609 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9610 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9612 amount_msat: htlc_amount_msat,
9613 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9614 cltv_expiry: 300000000,
9615 state: InboundHTLCState::Committed,
9618 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9620 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9621 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9622 cltv_expiry: 200000000,
9623 state: OutboundHTLCState::Committed,
9624 source: HTLCSource::OutboundRoute {
9625 path: Path { hops: Vec::new(), blinded_tail: None },
9626 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9627 first_hop_htlc_msat: 548,
9628 payment_id: PaymentId([42; 32]),
9630 skimmed_fee_msat: None,
9631 blinding_point: None,
9634 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9635 // the dust limit check.
9636 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9637 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9638 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9639 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9641 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9642 // of the HTLCs are seen to be above the dust limit.
9643 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9644 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9645 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9646 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9647 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9651 fn test_timeout_vs_success_htlc_dust_limit() {
9652 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9653 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9654 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9655 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9656 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9657 let secp_ctx = Secp256k1::new();
9658 let seed = [42; 32];
9659 let network = Network::Testnet;
9660 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9662 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9663 let config = UserConfig::default();
9664 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9666 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9667 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9669 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9670 // counted as dust when it shouldn't be.
9671 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9672 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9673 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9674 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9676 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9677 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9678 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9679 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9680 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9682 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9684 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9685 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9686 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9687 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9688 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9690 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9691 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9692 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9693 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9694 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9698 fn channel_reestablish_no_updates() {
9699 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9700 let logger = test_utils::TestLogger::new();
9701 let secp_ctx = Secp256k1::new();
9702 let seed = [42; 32];
9703 let network = Network::Testnet;
9704 let best_block = BestBlock::from_network(network);
9705 let chain_hash = ChainHash::using_genesis_block(network);
9706 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9708 // Go through the flow of opening a channel between two nodes.
9710 // Create Node A's channel pointing to Node B's pubkey
9711 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9712 let config = UserConfig::default();
9713 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9715 // Create Node B's channel by receiving Node A's open_channel message
9716 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9717 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9718 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9720 // Node B --> Node A: accept channel
9721 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9722 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9724 // Node A --> Node B: funding created
9725 let output_script = node_a_chan.context.get_funding_redeemscript();
9726 let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9727 value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
9729 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9730 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9731 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9733 // Node B --> Node A: funding signed
9734 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9735 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9737 // Now disconnect the two nodes and check that the commitment point in
9738 // Node B's channel_reestablish message is sane.
9739 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9740 let msg = node_b_chan.get_channel_reestablish(&&logger);
9741 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9742 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9743 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9745 // Check that the commitment point in Node A's channel_reestablish message
9747 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9748 let msg = node_a_chan.get_channel_reestablish(&&logger);
9749 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9750 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9751 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9755 fn test_configured_holder_max_htlc_value_in_flight() {
9756 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9757 let logger = test_utils::TestLogger::new();
9758 let secp_ctx = Secp256k1::new();
9759 let seed = [42; 32];
9760 let network = Network::Testnet;
9761 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9762 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9763 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9765 let mut config_2_percent = UserConfig::default();
9766 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9767 let mut config_99_percent = UserConfig::default();
9768 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9769 let mut config_0_percent = UserConfig::default();
9770 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9771 let mut config_101_percent = UserConfig::default();
9772 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9774 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9775 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9776 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9777 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9778 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9779 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9781 // Test with the upper bound - 1 of valid values (99%).
9782 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9783 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9784 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9786 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9788 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9789 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9790 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9791 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9792 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9793 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9795 // Test with the upper bound - 1 of valid values (99%).
9796 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9797 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9798 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9800 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9801 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9802 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9803 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9804 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9806 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9807 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9809 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9810 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9811 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9813 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9814 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9815 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9816 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9817 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9819 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9820 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9822 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9823 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9824 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9828 fn test_configured_holder_selected_channel_reserve_satoshis() {
9830 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9831 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9832 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9834 // Test with valid but unreasonably high channel reserves
9835 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9836 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9837 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9839 // Test with calculated channel reserve less than lower bound
9840 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9841 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9843 // Test with invalid channel reserves since sum of both is greater than or equal
9845 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9846 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9849 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9850 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9851 let logger = test_utils::TestLogger::new();
9852 let secp_ctx = Secp256k1::new();
9853 let seed = [42; 32];
9854 let network = Network::Testnet;
9855 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9856 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9857 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9860 let mut outbound_node_config = UserConfig::default();
9861 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9862 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9864 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9865 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9867 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9868 let mut inbound_node_config = UserConfig::default();
9869 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9871 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9872 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9874 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9876 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9877 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9879 // Channel Negotiations failed
9880 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9881 assert!(result.is_err());
9886 fn channel_update() {
9887 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9888 let logger = test_utils::TestLogger::new();
9889 let secp_ctx = Secp256k1::new();
9890 let seed = [42; 32];
9891 let network = Network::Testnet;
9892 let best_block = BestBlock::from_network(network);
9893 let chain_hash = ChainHash::using_genesis_block(network);
9894 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9896 // Create Node A's channel pointing to Node B's pubkey
9897 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9898 let config = UserConfig::default();
9899 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9901 // Create Node B's channel by receiving Node A's open_channel message
9902 // Make sure A's dust limit is as we expect.
9903 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9904 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9905 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9907 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9908 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9909 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9910 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9911 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9913 // Node A --> Node B: funding created
9914 let output_script = node_a_chan.context.get_funding_redeemscript();
9915 let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9916 value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
9918 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9919 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9920 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9922 // Node B --> Node A: funding signed
9923 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9924 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9926 // Make sure that receiving a channel update will update the Channel as expected.
9927 let update = ChannelUpdate {
9928 contents: UnsignedChannelUpdate {
9930 short_channel_id: 0,
9933 cltv_expiry_delta: 100,
9934 htlc_minimum_msat: 5,
9935 htlc_maximum_msat: MAX_VALUE_MSAT,
9937 fee_proportional_millionths: 11,
9938 excess_data: Vec::new(),
9940 signature: Signature::from(unsafe { FFISignature::new() })
9942 assert!(node_a_chan.channel_update(&update).unwrap());
9944 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9945 // change our official htlc_minimum_msat.
9946 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9947 match node_a_chan.context.counterparty_forwarding_info() {
9949 assert_eq!(info.cltv_expiry_delta, 100);
9950 assert_eq!(info.fee_base_msat, 110);
9951 assert_eq!(info.fee_proportional_millionths, 11);
9953 None => panic!("expected counterparty forwarding info to be Some")
9956 assert!(!node_a_chan.channel_update(&update).unwrap());
9960 fn blinding_point_skimmed_fee_malformed_ser() {
9961 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9963 let logger = test_utils::TestLogger::new();
9964 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9965 let secp_ctx = Secp256k1::new();
9966 let seed = [42; 32];
9967 let network = Network::Testnet;
9968 let best_block = BestBlock::from_network(network);
9969 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9971 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9972 let config = UserConfig::default();
9973 let features = channelmanager::provided_init_features(&config);
9974 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9975 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9977 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9978 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9979 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9981 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9982 let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9983 value: Amount::from_sat(10000000), script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9985 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9986 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9987 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9988 Ok((chan, _, _)) => chan,
9989 Err((_, e)) => panic!("{}", e),
9992 let dummy_htlc_source = HTLCSource::OutboundRoute {
9994 hops: vec![RouteHop {
9995 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9996 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9997 cltv_expiry_delta: 0, maybe_announced_channel: false,
10001 session_priv: test_utils::privkey(42),
10002 first_hop_htlc_msat: 0,
10003 payment_id: PaymentId([42; 32]),
10005 let dummy_outbound_output = OutboundHTLCOutput {
10008 payment_hash: PaymentHash([43; 32]),
10010 state: OutboundHTLCState::Committed,
10011 source: dummy_htlc_source.clone(),
10012 skimmed_fee_msat: None,
10013 blinding_point: None,
10015 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
10016 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
10018 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
10021 htlc.skimmed_fee_msat = Some(1);
10024 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
10026 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
10029 payment_hash: PaymentHash([43; 32]),
10030 source: dummy_htlc_source.clone(),
10031 onion_routing_packet: msgs::OnionPacket {
10033 public_key: Ok(test_utils::pubkey(1)),
10034 hop_data: [0; 20*65],
10037 skimmed_fee_msat: None,
10038 blinding_point: None,
10040 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
10041 payment_preimage: PaymentPreimage([42; 32]),
10044 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
10045 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
10047 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
10048 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
10050 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
10053 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
10054 } else if i % 5 == 1 {
10055 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
10056 } else if i % 5 == 2 {
10057 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
10058 if let HTLCUpdateAwaitingACK::AddHTLC {
10059 ref mut blinding_point, ref mut skimmed_fee_msat, ..
10060 } = &mut dummy_add {
10061 *blinding_point = Some(test_utils::pubkey(42 + i));
10062 *skimmed_fee_msat = Some(42);
10063 } else { panic!() }
10064 holding_cell_htlc_updates.push(dummy_add);
10065 } else if i % 5 == 3 {
10066 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
10068 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
10071 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
10073 // Encode and decode the channel and ensure that the HTLCs within are the same.
10074 let encoded_chan = chan.encode();
10075 let mut s = crate::io::Cursor::new(&encoded_chan);
10076 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
10077 let features = channelmanager::provided_channel_type_features(&config);
10078 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
10079 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
10080 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
10083 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
10085 fn outbound_commitment_test() {
10086 use bitcoin::sighash;
10087 use bitcoin::consensus::encode::serialize;
10088 use bitcoin::sighash::EcdsaSighashType;
10089 use bitcoin::hashes::hex::FromHex;
10090 use bitcoin::hash_types::Txid;
10091 use bitcoin::secp256k1::Message;
10092 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
10093 use crate::ln::PaymentPreimage;
10094 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
10095 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
10096 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
10097 use crate::util::logger::Logger;
10098 use crate::sync::Arc;
10099 use core::str::FromStr;
10100 use hex::DisplayHex;
10102 // Test vectors from BOLT 3 Appendices C and F (anchors):
10103 let feeest = TestFeeEstimator{fee_est: 15000};
10104 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
10105 let secp_ctx = Secp256k1::new();
10107 let mut signer = InMemorySigner::new(
10109 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
10110 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
10111 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10112 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
10113 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10115 // These aren't set in the test vectors:
10116 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10122 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
10123 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
10124 let keys_provider = Keys { signer: signer.clone() };
10126 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10127 let mut config = UserConfig::default();
10128 config.channel_handshake_config.announced_channel = false;
10129 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
10130 chan.context.holder_dust_limit_satoshis = 546;
10131 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
10133 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
10135 let counterparty_pubkeys = ChannelPublicKeys {
10136 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
10137 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
10138 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
10139 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
10140 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
10142 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
10143 CounterpartyChannelTransactionParameters {
10144 pubkeys: counterparty_pubkeys.clone(),
10145 selected_contest_delay: 144
10147 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
10148 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
10150 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
10151 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10153 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
10154 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
10156 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
10157 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10159 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
10160 // derived from a commitment_seed, so instead we copy it here and call
10161 // build_commitment_transaction.
10162 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
10163 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10164 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10165 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
10166 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
10168 macro_rules! test_commitment {
10169 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10170 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10171 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
10175 macro_rules! test_commitment_with_anchors {
10176 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10177 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10178 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
10182 macro_rules! test_commitment_common {
10183 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
10184 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
10186 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
10187 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
10189 let htlcs = commitment_stats.htlcs_included.drain(..)
10190 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
10192 (commitment_stats.tx, htlcs)
10194 let trusted_tx = commitment_tx.trust();
10195 let unsigned_tx = trusted_tx.built_transaction();
10196 let redeemscript = chan.context.get_funding_redeemscript();
10197 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
10198 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
10199 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
10200 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
10202 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
10203 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
10204 let mut counterparty_htlc_sigs = Vec::new();
10205 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
10207 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10208 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
10209 counterparty_htlc_sigs.push(remote_signature);
10211 assert_eq!(htlcs.len(), per_htlc.len());
10213 let holder_commitment_tx = HolderCommitmentTransaction::new(
10214 commitment_tx.clone(),
10215 counterparty_signature,
10216 counterparty_htlc_sigs,
10217 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
10218 chan.context.counterparty_funding_pubkey()
10220 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
10221 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
10223 let funding_redeemscript = chan.context.get_funding_redeemscript();
10224 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
10225 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
10227 // ((htlc, counterparty_sig), (index, holder_sig))
10228 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
10231 log_trace!(logger, "verifying htlc {}", $htlc_idx);
10232 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10234 let ref htlc = htlcs[$htlc_idx];
10235 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
10236 chan.context.get_counterparty_selected_contest_delay().unwrap(),
10237 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
10238 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
10239 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
10240 let htlc_sighash = Message::from_digest(sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap().as_raw_hash().to_byte_array());
10241 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
10243 let mut preimage: Option<PaymentPreimage> = None;
10246 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
10247 if out == htlc.payment_hash {
10248 preimage = Some(PaymentPreimage([i; 32]));
10252 assert!(preimage.is_some());
10255 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10256 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10257 channel_derivation_parameters: ChannelDerivationParameters {
10258 value_satoshis: chan.context.channel_value_satoshis,
10259 keys_id: chan.context.channel_keys_id,
10260 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10262 commitment_txid: trusted_tx.txid(),
10263 per_commitment_number: trusted_tx.commitment_number(),
10264 per_commitment_point: trusted_tx.per_commitment_point(),
10265 feerate_per_kw: trusted_tx.feerate_per_kw(),
10266 htlc: htlc.clone(),
10267 preimage: preimage.clone(),
10268 counterparty_sig: *htlc_counterparty_sig,
10269 }, &secp_ctx).unwrap();
10270 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10271 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10273 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10274 assert_eq!(signature, htlc_holder_sig, "htlc sig");
10275 let trusted_tx = holder_commitment_tx.trust();
10276 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10277 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10278 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10280 assert!(htlc_counterparty_sig_iter.next().is_none());
10284 // anchors: simple commitment tx with no HTLCs and single anchor
10285 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10286 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10287 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10289 // simple commitment tx with no HTLCs
10290 chan.context.value_to_self_msat = 7000000000;
10292 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10293 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10294 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10296 // anchors: simple commitment tx with no HTLCs
10297 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10298 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10299 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10301 chan.context.pending_inbound_htlcs.push({
10302 let mut out = InboundHTLCOutput{
10304 amount_msat: 1000000,
10306 payment_hash: PaymentHash([0; 32]),
10307 state: InboundHTLCState::Committed,
10309 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10312 chan.context.pending_inbound_htlcs.push({
10313 let mut out = InboundHTLCOutput{
10315 amount_msat: 2000000,
10317 payment_hash: PaymentHash([0; 32]),
10318 state: InboundHTLCState::Committed,
10320 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10323 chan.context.pending_outbound_htlcs.push({
10324 let mut out = OutboundHTLCOutput{
10326 amount_msat: 2000000,
10328 payment_hash: PaymentHash([0; 32]),
10329 state: OutboundHTLCState::Committed,
10330 source: HTLCSource::dummy(),
10331 skimmed_fee_msat: None,
10332 blinding_point: None,
10334 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10337 chan.context.pending_outbound_htlcs.push({
10338 let mut out = OutboundHTLCOutput{
10340 amount_msat: 3000000,
10342 payment_hash: PaymentHash([0; 32]),
10343 state: OutboundHTLCState::Committed,
10344 source: HTLCSource::dummy(),
10345 skimmed_fee_msat: None,
10346 blinding_point: None,
10348 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10351 chan.context.pending_inbound_htlcs.push({
10352 let mut out = InboundHTLCOutput{
10354 amount_msat: 4000000,
10356 payment_hash: PaymentHash([0; 32]),
10357 state: InboundHTLCState::Committed,
10359 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10363 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10364 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10365 chan.context.feerate_per_kw = 0;
10367 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10368 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10369 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10372 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10373 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10374 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10377 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10378 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10379 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10382 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10383 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10384 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10387 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10388 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10389 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10392 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10393 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10394 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10397 // commitment tx with seven outputs untrimmed (maximum feerate)
10398 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10399 chan.context.feerate_per_kw = 647;
10401 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10402 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10403 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10406 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10407 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10408 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10411 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10412 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10413 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10416 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10417 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10418 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10421 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10422 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10423 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10426 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10427 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10428 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10431 // commitment tx with six outputs untrimmed (minimum feerate)
10432 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10433 chan.context.feerate_per_kw = 648;
10435 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10436 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10437 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10440 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10441 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10442 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10445 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10446 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10447 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10450 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10451 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10452 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10455 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10456 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10457 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10460 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10461 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10462 chan.context.feerate_per_kw = 645;
10463 chan.context.holder_dust_limit_satoshis = 1001;
10465 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10466 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10467 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10470 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10471 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10472 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10475 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10476 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10477 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10480 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10481 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10482 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10485 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10486 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10487 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10490 // commitment tx with six outputs untrimmed (maximum feerate)
10491 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10492 chan.context.feerate_per_kw = 2069;
10493 chan.context.holder_dust_limit_satoshis = 546;
10495 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10496 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10497 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10500 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10501 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10502 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10505 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10506 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10507 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10510 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10511 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10512 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10515 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10516 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10517 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10520 // commitment tx with five outputs untrimmed (minimum feerate)
10521 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10522 chan.context.feerate_per_kw = 2070;
10524 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10525 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10526 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10529 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10530 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10531 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10534 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10535 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10536 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10539 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10540 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10541 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10544 // commitment tx with five outputs untrimmed (maximum feerate)
10545 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10546 chan.context.feerate_per_kw = 2194;
10548 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10549 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10550 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10553 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10554 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10555 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10558 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10559 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10560 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10563 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10564 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10565 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10568 // commitment tx with four outputs untrimmed (minimum feerate)
10569 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10570 chan.context.feerate_per_kw = 2195;
10572 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10573 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10574 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10577 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10578 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10579 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10582 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10583 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10584 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10587 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10588 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10589 chan.context.feerate_per_kw = 2185;
10590 chan.context.holder_dust_limit_satoshis = 2001;
10591 let cached_channel_type = chan.context.channel_type;
10592 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10594 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10595 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10596 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10599 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10600 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10601 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10604 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10605 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10606 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10609 // commitment tx with four outputs untrimmed (maximum feerate)
10610 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10611 chan.context.feerate_per_kw = 3702;
10612 chan.context.holder_dust_limit_satoshis = 546;
10613 chan.context.channel_type = cached_channel_type.clone();
10615 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10616 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10617 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10620 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10621 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10622 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10625 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10626 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10627 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10630 // commitment tx with three outputs untrimmed (minimum feerate)
10631 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10632 chan.context.feerate_per_kw = 3703;
10634 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10635 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10636 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10639 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10640 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10641 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10644 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10645 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10646 chan.context.feerate_per_kw = 3687;
10647 chan.context.holder_dust_limit_satoshis = 3001;
10648 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10650 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10651 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10652 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10655 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10656 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10657 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10660 // commitment tx with three outputs untrimmed (maximum feerate)
10661 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10662 chan.context.feerate_per_kw = 4914;
10663 chan.context.holder_dust_limit_satoshis = 546;
10664 chan.context.channel_type = cached_channel_type.clone();
10666 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10667 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10668 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10671 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10672 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10673 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10676 // commitment tx with two outputs untrimmed (minimum feerate)
10677 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10678 chan.context.feerate_per_kw = 4915;
10679 chan.context.holder_dust_limit_satoshis = 546;
10681 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10682 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10683 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10685 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10686 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10687 chan.context.feerate_per_kw = 4894;
10688 chan.context.holder_dust_limit_satoshis = 4001;
10689 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10691 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10692 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10693 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10695 // commitment tx with two outputs untrimmed (maximum feerate)
10696 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10697 chan.context.feerate_per_kw = 9651180;
10698 chan.context.holder_dust_limit_satoshis = 546;
10699 chan.context.channel_type = cached_channel_type.clone();
10701 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10702 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10703 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10705 // commitment tx with one output untrimmed (minimum feerate)
10706 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10707 chan.context.feerate_per_kw = 9651181;
10709 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10710 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10711 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10713 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10714 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10715 chan.context.feerate_per_kw = 6216010;
10716 chan.context.holder_dust_limit_satoshis = 4001;
10717 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10719 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10720 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10721 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10723 // commitment tx with fee greater than funder amount
10724 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10725 chan.context.feerate_per_kw = 9651936;
10726 chan.context.holder_dust_limit_satoshis = 546;
10727 chan.context.channel_type = cached_channel_type;
10729 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10730 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10731 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10733 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10734 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10735 chan.context.feerate_per_kw = 253;
10736 chan.context.pending_inbound_htlcs.clear();
10737 chan.context.pending_inbound_htlcs.push({
10738 let mut out = InboundHTLCOutput{
10740 amount_msat: 2000000,
10742 payment_hash: PaymentHash([0; 32]),
10743 state: InboundHTLCState::Committed,
10745 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10748 chan.context.pending_outbound_htlcs.clear();
10749 chan.context.pending_outbound_htlcs.push({
10750 let mut out = OutboundHTLCOutput{
10752 amount_msat: 5000001,
10754 payment_hash: PaymentHash([0; 32]),
10755 state: OutboundHTLCState::Committed,
10756 source: HTLCSource::dummy(),
10757 skimmed_fee_msat: None,
10758 blinding_point: None,
10760 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10763 chan.context.pending_outbound_htlcs.push({
10764 let mut out = OutboundHTLCOutput{
10766 amount_msat: 5000000,
10768 payment_hash: PaymentHash([0; 32]),
10769 state: OutboundHTLCState::Committed,
10770 source: HTLCSource::dummy(),
10771 skimmed_fee_msat: None,
10772 blinding_point: None,
10774 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10778 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10779 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10780 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10783 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10784 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10785 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10787 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10788 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10789 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10791 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10792 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10793 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10796 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10797 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10798 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10799 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10802 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10803 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10804 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10806 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10807 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10808 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10810 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10811 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10812 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10817 fn test_per_commitment_secret_gen() {
10818 // Test vectors from BOLT 3 Appendix D:
10820 let mut seed = [0; 32];
10821 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10822 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10823 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10825 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10826 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10827 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10829 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10830 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10832 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10833 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10835 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10836 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10837 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10841 fn test_key_derivation() {
10842 // Test vectors from BOLT 3 Appendix E:
10843 let secp_ctx = Secp256k1::new();
10845 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10846 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10848 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10849 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10851 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10852 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10854 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10855 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10857 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10858 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10860 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10861 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10865 fn test_zero_conf_channel_type_support() {
10866 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10867 let secp_ctx = Secp256k1::new();
10868 let seed = [42; 32];
10869 let network = Network::Testnet;
10870 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10871 let logger = test_utils::TestLogger::new();
10873 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10874 let config = UserConfig::default();
10875 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10876 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10878 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10879 channel_type_features.set_zero_conf_required();
10881 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10882 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10883 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10884 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10885 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10886 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10887 assert!(res.is_ok());
10891 fn test_supports_anchors_zero_htlc_tx_fee() {
10892 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10893 // resulting `channel_type`.
10894 let secp_ctx = Secp256k1::new();
10895 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10896 let network = Network::Testnet;
10897 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10898 let logger = test_utils::TestLogger::new();
10900 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10901 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10903 let mut config = UserConfig::default();
10904 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10906 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10907 // need to signal it.
10908 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10909 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10910 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10911 &config, 0, 42, None
10913 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10915 let mut expected_channel_type = ChannelTypeFeatures::empty();
10916 expected_channel_type.set_static_remote_key_required();
10917 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10919 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10920 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10921 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10925 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10926 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10927 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10928 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10929 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10932 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10933 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10937 fn test_rejects_implicit_simple_anchors() {
10938 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10939 // each side's `InitFeatures`, it is rejected.
10940 let secp_ctx = Secp256k1::new();
10941 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10942 let network = Network::Testnet;
10943 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10944 let logger = test_utils::TestLogger::new();
10946 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10947 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10949 let config = UserConfig::default();
10951 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10952 let static_remote_key_required: u64 = 1 << 12;
10953 let simple_anchors_required: u64 = 1 << 20;
10954 let raw_init_features = static_remote_key_required | simple_anchors_required;
10955 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10957 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10958 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10959 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10963 // Set `channel_type` to `None` to force the implicit feature negotiation.
10964 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10965 open_channel_msg.common_fields.channel_type = None;
10967 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10968 // `static_remote_key`, it will fail the channel.
10969 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10970 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10971 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10972 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10974 assert!(channel_b.is_err());
10978 fn test_rejects_simple_anchors_channel_type() {
10979 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10981 let secp_ctx = Secp256k1::new();
10982 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10983 let network = Network::Testnet;
10984 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10985 let logger = test_utils::TestLogger::new();
10987 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10988 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10990 let config = UserConfig::default();
10992 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10993 let static_remote_key_required: u64 = 1 << 12;
10994 let simple_anchors_required: u64 = 1 << 20;
10995 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10996 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10997 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10998 assert!(!simple_anchors_init.requires_unknown_bits());
10999 assert!(!simple_anchors_channel_type.requires_unknown_bits());
11001 // First, we'll try to open a channel between A and B where A requests a channel type for
11002 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
11003 // B as it's not supported by LDK.
11004 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11005 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
11006 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
11010 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11011 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
11013 let res = InboundV1Channel::<&TestKeysInterface>::new(
11014 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11015 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
11016 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11018 assert!(res.is_err());
11020 // Then, we'll try to open another channel where A requests a channel type for
11021 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
11022 // original `option_anchors` feature, which should be rejected by A as it's not supported by
11024 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11025 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
11026 10000000, 100000, 42, &config, 0, 42, None
11029 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11031 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
11032 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11033 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
11034 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11037 let mut accept_channel_msg = channel_b.get_accept_channel_message();
11038 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
11040 let res = channel_a.accept_channel(
11041 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
11043 assert!(res.is_err());
11047 fn test_waiting_for_batch() {
11048 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
11049 let logger = test_utils::TestLogger::new();
11050 let secp_ctx = Secp256k1::new();
11051 let seed = [42; 32];
11052 let network = Network::Testnet;
11053 let best_block = BestBlock::from_network(network);
11054 let chain_hash = ChainHash::using_genesis_block(network);
11055 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
11057 let mut config = UserConfig::default();
11058 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
11059 // channel in a batch before all channels are ready.
11060 config.channel_handshake_limits.trust_own_funding_0conf = true;
11062 // Create a channel from node a to node b that will be part of batch funding.
11063 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
11064 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
11069 &channelmanager::provided_init_features(&config),
11079 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
11080 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
11081 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
11086 &channelmanager::provided_channel_type_features(&config),
11087 &channelmanager::provided_init_features(&config),
11093 true, // Allow node b to send a 0conf channel_ready.
11096 let accept_channel_msg = node_b_chan.accept_inbound_channel();
11097 node_a_chan.accept_channel(
11098 &accept_channel_msg,
11099 &config.channel_handshake_limits,
11100 &channelmanager::provided_init_features(&config),
11103 // Fund the channel with a batch funding transaction.
11104 let output_script = node_a_chan.context.get_funding_redeemscript();
11105 let tx = Transaction {
11106 version: Version::ONE,
11107 lock_time: LockTime::ZERO,
11111 value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
11114 value: Amount::from_sat(10000000), script_pubkey: Builder::new().into_script(),
11117 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
11118 let funding_created_msg = node_a_chan.get_funding_created(
11119 tx.clone(), funding_outpoint, true, &&logger,
11120 ).map_err(|_| ()).unwrap();
11121 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
11122 &funding_created_msg.unwrap(),
11126 ).map_err(|_| ()).unwrap();
11127 let node_b_updates = node_b_chan.monitor_updating_restored(
11135 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
11136 // broadcasting the funding transaction until the batch is ready.
11137 let res = node_a_chan.funding_signed(
11138 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
11140 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
11141 let node_a_updates = node_a_chan.monitor_updating_restored(
11148 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
11149 // as the funding transaction depends on all channels in the batch becoming ready.
11150 assert!(node_a_updates.channel_ready.is_none());
11151 assert!(node_a_updates.funding_broadcastable.is_none());
11152 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
11154 // It is possible to receive a 0conf channel_ready from the remote node.
11155 node_a_chan.channel_ready(
11156 &node_b_updates.channel_ready.unwrap(),
11164 node_a_chan.context.channel_state,
11165 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
11168 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
11169 node_a_chan.set_batch_ready();
11170 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
11171 assert!(node_a_chan.check_get_channel_ready(0).is_some());