1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::script::{Script,Builder};
11 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
12 use bitcoin::util::sighash;
13 use bitcoin::consensus::encode;
15 use bitcoin::hashes::Hash;
16 use bitcoin::hashes::sha256::Hash as Sha256;
17 use bitcoin::hashes::sha256d::Hash as Sha256d;
18 use bitcoin::hash_types::{Txid, BlockHash};
20 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
21 use bitcoin::secp256k1::{PublicKey,SecretKey};
22 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
23 use bitcoin::secp256k1;
25 use crate::ln::{PaymentPreimage, PaymentHash};
26 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
28 use crate::ln::msgs::DecodeError;
29 use crate::ln::script::{self, ShutdownScript};
30 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
31 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
32 use crate::ln::chan_utils;
33 use crate::ln::onion_utils::HTLCFailReason;
34 use crate::chain::BestBlock;
35 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
39 use crate::events::ClosureReason;
40 use crate::routing::gossip::NodeId;
41 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
42 use crate::util::logger::Logger;
43 use crate::util::errors::APIError;
44 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
45 use crate::util::scid_utils::scid_from_parts;
48 use crate::prelude::*;
49 use core::{cmp,mem,fmt};
51 #[cfg(any(test, fuzzing, debug_assertions))]
52 use crate::sync::Mutex;
53 use bitcoin::hashes::hex::ToHex;
56 pub struct ChannelValueStat {
57 pub value_to_self_msat: u64,
58 pub channel_value_msat: u64,
59 pub channel_reserve_msat: u64,
60 pub pending_outbound_htlcs_amount_msat: u64,
61 pub pending_inbound_htlcs_amount_msat: u64,
62 pub holding_cell_outbound_amount_msat: u64,
63 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
64 pub counterparty_dust_limit_msat: u64,
67 pub struct AvailableBalances {
68 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
69 pub balance_msat: u64,
70 /// Total amount available for our counterparty to send to us.
71 pub inbound_capacity_msat: u64,
72 /// Total amount available for us to send to our counterparty.
73 pub outbound_capacity_msat: u64,
74 /// The maximum value we can assign to the next outbound HTLC
75 pub next_outbound_htlc_limit_msat: u64,
76 /// The minimum value we can assign to the next outbound HTLC
77 pub next_outbound_htlc_minimum_msat: u64,
80 #[derive(Debug, Clone, Copy, PartialEq)]
82 // Inbound states mirroring InboundHTLCState
84 AwaitingRemoteRevokeToAnnounce,
85 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
86 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
87 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
88 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
89 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
91 // Outbound state can only be `LocalAnnounced` or `Committed`
95 enum InboundHTLCRemovalReason {
96 FailRelay(msgs::OnionErrorPacket),
97 FailMalformed(([u8; 32], u16)),
98 Fulfill(PaymentPreimage),
101 enum InboundHTLCState {
102 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
103 /// update_add_htlc message for this HTLC.
104 RemoteAnnounced(PendingHTLCStatus),
105 /// Included in a received commitment_signed message (implying we've
106 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
107 /// state (see the example below). We have not yet included this HTLC in a
108 /// commitment_signed message because we are waiting on the remote's
109 /// aforementioned state revocation. One reason this missing remote RAA
110 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
111 /// is because every time we create a new "state", i.e. every time we sign a
112 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
113 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
114 /// sent provided the per_commitment_point for our current commitment tx.
115 /// The other reason we should not send a commitment_signed without their RAA
116 /// is because their RAA serves to ACK our previous commitment_signed.
118 /// Here's an example of how an HTLC could come to be in this state:
119 /// remote --> update_add_htlc(prev_htlc) --> local
120 /// remote --> commitment_signed(prev_htlc) --> local
121 /// remote <-- revoke_and_ack <-- local
122 /// remote <-- commitment_signed(prev_htlc) <-- local
123 /// [note that here, the remote does not respond with a RAA]
124 /// remote --> update_add_htlc(this_htlc) --> local
125 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
126 /// Now `this_htlc` will be assigned this state. It's unable to be officially
127 /// accepted, i.e. included in a commitment_signed, because we're missing the
128 /// RAA that provides our next per_commitment_point. The per_commitment_point
129 /// is used to derive commitment keys, which are used to construct the
130 /// signatures in a commitment_signed message.
131 /// Implies AwaitingRemoteRevoke.
133 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
134 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
135 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
136 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
137 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
138 /// channel (before it can then get forwarded and/or removed).
139 /// Implies AwaitingRemoteRevoke.
140 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
142 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
143 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
145 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
146 /// commitment transaction without it as otherwise we'll have to force-close the channel to
147 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
148 /// anyway). That said, ChannelMonitor does this for us (see
149 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
150 /// our own local state before then, once we're sure that the next commitment_signed and
151 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
152 LocalRemoved(InboundHTLCRemovalReason),
155 struct InboundHTLCOutput {
159 payment_hash: PaymentHash,
160 state: InboundHTLCState,
163 enum OutboundHTLCState {
164 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
165 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
166 /// we will promote to Committed (note that they may not accept it until the next time we
167 /// revoke, but we don't really care about that:
168 /// * they've revoked, so worst case we can announce an old state and get our (option on)
169 /// money back (though we won't), and,
170 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
171 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
172 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
173 /// we'll never get out of sync).
174 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
175 /// OutboundHTLCOutput's size just for a temporary bit
176 LocalAnnounced(Box<msgs::OnionPacket>),
178 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
179 /// the change (though they'll need to revoke before we fail the payment).
180 RemoteRemoved(OutboundHTLCOutcome),
181 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
182 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
183 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
184 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
185 /// remote revoke_and_ack on a previous state before we can do so.
186 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
187 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
191 /// revoke_and_ack to drop completely.
192 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
196 enum OutboundHTLCOutcome {
197 /// LDK version 0.0.105+ will always fill in the preimage here.
198 Success(Option<PaymentPreimage>),
199 Failure(HTLCFailReason),
202 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
203 fn from(o: Option<HTLCFailReason>) -> Self {
205 None => OutboundHTLCOutcome::Success(None),
206 Some(r) => OutboundHTLCOutcome::Failure(r)
211 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
212 fn into(self) -> Option<&'a HTLCFailReason> {
214 OutboundHTLCOutcome::Success(_) => None,
215 OutboundHTLCOutcome::Failure(ref r) => Some(r)
220 struct OutboundHTLCOutput {
224 payment_hash: PaymentHash,
225 state: OutboundHTLCState,
229 /// See AwaitingRemoteRevoke ChannelState for more info
230 enum HTLCUpdateAwaitingACK {
231 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
235 payment_hash: PaymentHash,
237 onion_routing_packet: msgs::OnionPacket,
240 payment_preimage: PaymentPreimage,
245 err_packet: msgs::OnionErrorPacket,
249 /// There are a few "states" and then a number of flags which can be applied:
250 /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
251 /// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
252 /// move on to ChannelReady.
253 /// Note that PeerDisconnected can be set on both ChannelReady and FundingSent.
254 /// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we
255 /// move on to ShutdownComplete, at which point most calls into this channel are disallowed.
257 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
258 OurInitSent = 1 << 0,
259 /// Implies we have received their open_channel/accept_channel message
260 TheirInitSent = 1 << 1,
261 /// We have sent funding_created and are awaiting a funding_signed to advance to FundingSent.
262 /// Note that this is nonsense for an inbound channel as we immediately generate funding_signed
263 /// upon receipt of funding_created, so simply skip this state.
265 /// Set when we have received/sent funding_created and funding_signed and are thus now waiting
266 /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
267 /// and our counterparty consider the funding transaction confirmed.
269 /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
270 /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
271 TheirChannelReady = 1 << 4,
272 /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
273 /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady.
274 OurChannelReady = 1 << 5,
276 /// Flag which is set on ChannelReady and FundingSent indicating remote side is considered
277 /// "disconnected" and no updates are allowed until after we've done a channel_reestablish
279 PeerDisconnected = 1 << 7,
280 /// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has
281 /// told us a ChannelMonitor update is pending async persistence somewhere and we should pause
282 /// sending any outbound messages until they've managed to finish.
283 MonitorUpdateInProgress = 1 << 8,
284 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
285 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
286 /// messages as then we will be unable to determine which HTLCs they included in their
287 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
289 /// Flag is set on ChannelReady.
290 AwaitingRemoteRevoke = 1 << 9,
291 /// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from
292 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
293 /// to respond with our own shutdown message when possible.
294 RemoteShutdownSent = 1 << 10,
295 /// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this
296 /// point, we may not add any new HTLCs to the channel.
297 LocalShutdownSent = 1 << 11,
298 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
299 /// to drop us, but we store this anyway.
300 ShutdownComplete = 4096,
302 const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
303 const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32;
305 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
307 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
308 /// our counterparty or not. However, we don't want to announce updates right away to avoid
309 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
310 /// our channel_update message and track the current state here.
311 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
312 #[derive(Clone, Copy, PartialEq)]
313 pub(super) enum ChannelUpdateStatus {
314 /// We've announced the channel as enabled and are connected to our peer.
316 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
318 /// Our channel is live again, but we haven't announced the channel as enabled yet.
320 /// We've announced the channel as disabled.
324 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
326 pub enum AnnouncementSigsState {
327 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
328 /// we sent the last `AnnouncementSignatures`.
330 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
331 /// This state never appears on disk - instead we write `NotSent`.
333 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
334 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
335 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
336 /// they send back a `RevokeAndACK`.
337 /// This state never appears on disk - instead we write `NotSent`.
339 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
340 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
344 /// An enum indicating whether the local or remote side offered a given HTLC.
350 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
353 pending_htlcs_value_msat: u64,
354 on_counterparty_tx_dust_exposure_msat: u64,
355 on_holder_tx_dust_exposure_msat: u64,
356 holding_cell_msat: u64,
357 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
360 /// An enum gathering stats on commitment transaction, either local or remote.
361 struct CommitmentStats<'a> {
362 tx: CommitmentTransaction, // the transaction info
363 feerate_per_kw: u32, // the feerate included to build the transaction
364 total_fee_sat: u64, // the total fee included in the transaction
365 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
366 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
367 local_balance_msat: u64, // local balance before fees but considering dust limits
368 remote_balance_msat: u64, // remote balance before fees but considering dust limits
369 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
372 /// Used when calculating whether we or the remote can afford an additional HTLC.
373 struct HTLCCandidate {
375 origin: HTLCInitiator,
379 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
387 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
389 enum UpdateFulfillFetch {
391 monitor_update: ChannelMonitorUpdate,
392 htlc_value_msat: u64,
393 msg: Option<msgs::UpdateFulfillHTLC>,
398 /// The return type of get_update_fulfill_htlc_and_commit.
399 pub enum UpdateFulfillCommitFetch<'a> {
400 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
401 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
402 /// previously placed in the holding cell (and has since been removed).
404 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
405 monitor_update: &'a ChannelMonitorUpdate,
406 /// The value of the HTLC which was claimed, in msat.
407 htlc_value_msat: u64,
409 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
410 /// or has been forgotten (presumably previously claimed).
414 /// The return value of `monitor_updating_restored`
415 pub(super) struct MonitorRestoreUpdates {
416 pub raa: Option<msgs::RevokeAndACK>,
417 pub commitment_update: Option<msgs::CommitmentUpdate>,
418 pub order: RAACommitmentOrder,
419 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
420 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
421 pub finalized_claimed_htlcs: Vec<HTLCSource>,
422 pub funding_broadcastable: Option<Transaction>,
423 pub channel_ready: Option<msgs::ChannelReady>,
424 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
427 /// The return value of `channel_reestablish`
428 pub(super) struct ReestablishResponses {
429 pub channel_ready: Option<msgs::ChannelReady>,
430 pub raa: Option<msgs::RevokeAndACK>,
431 pub commitment_update: Option<msgs::CommitmentUpdate>,
432 pub order: RAACommitmentOrder,
433 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
434 pub shutdown_msg: Option<msgs::Shutdown>,
437 /// The return type of `force_shutdown`
438 pub(crate) type ShutdownResult = (
439 Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
440 Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
443 /// If the majority of the channels funds are to the fundee and the initiator holds only just
444 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
445 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
446 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
447 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
448 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
449 /// by this multiple without hitting this case, before sending.
450 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
451 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
452 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
453 /// leave the channel less usable as we hold a bigger reserve.
454 #[cfg(any(fuzzing, test))]
455 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
456 #[cfg(not(any(fuzzing, test)))]
457 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
459 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
460 /// channel creation on an inbound channel, we simply force-close and move on.
461 /// This constant is the one suggested in BOLT 2.
462 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
464 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
465 /// not have enough balance value remaining to cover the onchain cost of this new
466 /// HTLC weight. If this happens, our counterparty fails the reception of our
467 /// commitment_signed including this new HTLC due to infringement on the channel
469 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
470 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
471 /// leads to a channel force-close. Ultimately, this is an issue coming from the
472 /// design of LN state machines, allowing asynchronous updates.
473 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
475 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
476 /// commitment transaction fees, with at least this many HTLCs present on the commitment
477 /// transaction (not counting the value of the HTLCs themselves).
478 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
480 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
481 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
482 /// ChannelUpdate prompted by the config update. This value was determined as follows:
484 /// * The expected interval between ticks (1 minute).
485 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
486 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
487 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
488 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
490 /// The number of ticks that may elapse while we're waiting for a response to a
491 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
494 /// See [`Channel::sent_message_awaiting_response`] for more information.
495 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
497 struct PendingChannelMonitorUpdate {
498 update: ChannelMonitorUpdate,
499 /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
500 /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
501 /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
503 /// [`ChannelManager`]: super::channelmanager::ChannelManager
507 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
508 (0, update, required),
509 (2, blocked, required),
512 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
513 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
514 // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
517 // Holder designates channel data owned for the benefice of the user client.
518 // Counterparty designates channel data owned by the another channel participant entity.
519 pub(super) struct Channel<Signer: ChannelSigner> {
520 config: LegacyChannelConfig,
522 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
523 // constructed using it. The second element in the tuple corresponds to the number of ticks that
524 // have elapsed since the update occurred.
525 prev_config: Option<(ChannelConfig, usize)>,
527 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
531 channel_id: [u8; 32],
532 temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
535 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
536 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
538 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
539 // Note that a number of our tests were written prior to the behavior here which retransmits
540 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
542 #[cfg(any(test, feature = "_test_utils"))]
543 pub(crate) announcement_sigs_state: AnnouncementSigsState,
544 #[cfg(not(any(test, feature = "_test_utils")))]
545 announcement_sigs_state: AnnouncementSigsState,
547 secp_ctx: Secp256k1<secp256k1::All>,
548 channel_value_satoshis: u64,
550 latest_monitor_update_id: u64,
552 holder_signer: Signer,
553 shutdown_scriptpubkey: Option<ShutdownScript>,
554 destination_script: Script,
556 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
557 // generation start at 0 and count up...this simplifies some parts of implementation at the
558 // cost of others, but should really just be changed.
560 cur_holder_commitment_transaction_number: u64,
561 cur_counterparty_commitment_transaction_number: u64,
562 value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
563 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
564 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
565 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
567 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
568 /// need to ensure we resend them in the order we originally generated them. Note that because
569 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
570 /// sufficient to simply set this to the opposite of any message we are generating as we
571 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
572 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
574 resend_order: RAACommitmentOrder,
576 monitor_pending_channel_ready: bool,
577 monitor_pending_revoke_and_ack: bool,
578 monitor_pending_commitment_signed: bool,
580 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
581 // responsible for some of the HTLCs here or not - we don't know whether the update in question
582 // completed or not. We currently ignore these fields entirely when force-closing a channel,
583 // but need to handle this somehow or we run the risk of losing HTLCs!
584 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
585 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
586 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
588 // pending_update_fee is filled when sending and receiving update_fee.
590 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
591 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
592 // generating new commitment transactions with exactly the same criteria as inbound/outbound
593 // HTLCs with similar state.
594 pending_update_fee: Option<(u32, FeeUpdateState)>,
595 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
596 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
597 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
598 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
599 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
600 holding_cell_update_fee: Option<u32>,
601 next_holder_htlc_id: u64,
602 next_counterparty_htlc_id: u64,
605 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
606 /// when the channel is updated in ways which may impact the `channel_update` message or when a
607 /// new block is received, ensuring it's always at least moderately close to the current real
609 update_time_counter: u32,
611 #[cfg(debug_assertions)]
612 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
613 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
614 #[cfg(debug_assertions)]
615 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
616 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
618 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
619 target_closing_feerate_sats_per_kw: Option<u32>,
621 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
622 /// update, we need to delay processing it until later. We do that here by simply storing the
623 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
624 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
626 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
627 /// transaction. These are set once we reach `closing_negotiation_ready`.
629 pub(crate) closing_fee_limits: Option<(u64, u64)>,
631 closing_fee_limits: Option<(u64, u64)>,
633 /// Flag that ensures that `accept_inbound_channel` must be called before `funding_created`
634 /// is executed successfully. The reason for this flag is that when the
635 /// `UserConfig::manually_accept_inbound_channels` config flag is set to true, inbound channels
636 /// are required to be manually accepted by the node operator before the `msgs::AcceptChannel`
637 /// message is created and sent out. During the manual accept process, `accept_inbound_channel`
638 /// is called by `ChannelManager::accept_inbound_channel`.
640 /// The flag counteracts that a counterparty node could theoretically send a
641 /// `msgs::FundingCreated` message before the node operator has manually accepted an inbound
642 /// channel request made by the counterparty node. That would execute `funding_created` before
643 /// `accept_inbound_channel`, and `funding_created` should therefore not execute successfully.
644 inbound_awaiting_accept: bool,
646 /// The hash of the block in which the funding transaction was included.
647 funding_tx_confirmed_in: Option<BlockHash>,
648 funding_tx_confirmation_height: u32,
649 short_channel_id: Option<u64>,
650 /// Either the height at which this channel was created or the height at which it was last
651 /// serialized if it was serialized by versions prior to 0.0.103.
652 /// We use this to close if funding is never broadcasted.
653 channel_creation_height: u32,
655 counterparty_dust_limit_satoshis: u64,
658 pub(super) holder_dust_limit_satoshis: u64,
660 holder_dust_limit_satoshis: u64,
663 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
665 counterparty_max_htlc_value_in_flight_msat: u64,
668 pub(super) holder_max_htlc_value_in_flight_msat: u64,
670 holder_max_htlc_value_in_flight_msat: u64,
672 /// minimum channel reserve for self to maintain - set by them.
673 counterparty_selected_channel_reserve_satoshis: Option<u64>,
676 pub(super) holder_selected_channel_reserve_satoshis: u64,
678 holder_selected_channel_reserve_satoshis: u64,
680 counterparty_htlc_minimum_msat: u64,
681 holder_htlc_minimum_msat: u64,
683 pub counterparty_max_accepted_htlcs: u16,
685 counterparty_max_accepted_htlcs: u16,
686 holder_max_accepted_htlcs: u16,
687 minimum_depth: Option<u32>,
689 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
691 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
692 funding_transaction: Option<Transaction>,
694 counterparty_cur_commitment_point: Option<PublicKey>,
695 counterparty_prev_commitment_point: Option<PublicKey>,
696 counterparty_node_id: PublicKey,
698 counterparty_shutdown_scriptpubkey: Option<Script>,
700 commitment_secrets: CounterpartyCommitmentSecrets,
702 channel_update_status: ChannelUpdateStatus,
703 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
704 /// not complete within a single timer tick (one minute), we should force-close the channel.
705 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
707 /// Note that this field is reset to false on deserialization to give us a chance to connect to
708 /// our peer and start the closing_signed negotiation fresh.
709 closing_signed_in_flight: bool,
711 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
712 /// This can be used to rebroadcast the channel_announcement message later.
713 announcement_sigs: Option<(Signature, Signature)>,
715 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
716 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
717 // be, by comparing the cached values to the fee of the tranaction generated by
718 // `build_commitment_transaction`.
719 #[cfg(any(test, fuzzing))]
720 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
721 #[cfg(any(test, fuzzing))]
722 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
724 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
725 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
726 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
727 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
728 /// message until we receive a channel_reestablish.
730 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
731 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
733 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
734 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
735 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
736 /// unblock the state machine.
738 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
739 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
740 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
742 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
743 /// [`msgs::RevokeAndACK`] message from the counterparty.
744 sent_message_awaiting_response: Option<usize>,
746 #[cfg(any(test, fuzzing))]
747 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
748 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
749 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
750 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
751 // is fine, but as a sanity check in our failure to generate the second claim, we check here
752 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
753 historical_inbound_htlc_fulfills: HashSet<u64>,
755 /// This channel's type, as negotiated during channel open
756 channel_type: ChannelTypeFeatures,
758 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
759 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
760 // the channel's funding UTXO.
762 // We also use this when sending our peer a channel_update that isn't to be broadcasted
763 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
764 // associated channel mapping.
766 // We only bother storing the most recent SCID alias at any time, though our counterparty has
767 // to store all of them.
768 latest_inbound_scid_alias: Option<u64>,
770 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
771 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
772 // don't currently support node id aliases and eventually privacy should be provided with
773 // blinded paths instead of simple scid+node_id aliases.
774 outbound_scid_alias: u64,
776 // We track whether we already emitted a `ChannelPending` event.
777 channel_pending_event_emitted: bool,
779 // We track whether we already emitted a `ChannelReady` event.
780 channel_ready_event_emitted: bool,
782 /// The unique identifier used to re-derive the private key material for the channel through
783 /// [`SignerProvider::derive_channel_signer`].
784 channel_keys_id: [u8; 32],
786 /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
787 /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
788 /// completes we still need to be able to complete the persistence. Thus, we have to keep a
789 /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
790 pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
793 #[cfg(any(test, fuzzing))]
794 struct CommitmentTxInfoCached {
796 total_pending_htlcs: usize,
797 next_holder_htlc_id: u64,
798 next_counterparty_htlc_id: u64,
802 pub const DEFAULT_MAX_HTLCS: u16 = 50;
804 pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
805 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
806 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
807 if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
811 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
813 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
815 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
817 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
818 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
819 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
820 /// `holder_max_htlc_value_in_flight_msat`.
821 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
823 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
824 /// `option_support_large_channel` (aka wumbo channels) is not supported.
826 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
828 /// Total bitcoin supply in satoshis.
829 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
831 /// The maximum network dust limit for standard script formats. This currently represents the
832 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
833 /// transaction non-standard and thus refuses to relay it.
834 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
835 /// implementations use this value for their dust limit today.
836 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
838 /// The maximum channel dust limit we will accept from our counterparty.
839 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
841 /// The dust limit is used for both the commitment transaction outputs as well as the closing
842 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
843 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
844 /// In order to avoid having to concern ourselves with standardness during the closing process, we
845 /// simply require our counterparty to use a dust limit which will leave any segwit output
847 /// See <https://github.com/lightning/bolts/issues/905> for more details.
848 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
850 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
851 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
853 /// Used to return a simple Error back to ChannelManager. Will get converted to a
854 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
855 /// channel_id in ChannelManager.
856 pub(super) enum ChannelError {
862 impl fmt::Debug for ChannelError {
863 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
865 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
866 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
867 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
872 macro_rules! secp_check {
873 ($res: expr, $err: expr) => {
876 Err(_) => return Err(ChannelError::Close($err)),
881 impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
882 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
883 /// `channel_value_satoshis` in msat, set through
884 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
886 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
888 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
889 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
890 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
892 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
895 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
897 channel_value_satoshis * 10 * configured_percent
900 /// Returns a minimum channel reserve value the remote needs to maintain,
901 /// required by us according to the configured or default
902 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
904 /// Guaranteed to return a value no larger than channel_value_satoshis
906 /// This is used both for outbound and inbound channels and has lower bound
907 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
908 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
909 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
910 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
913 /// This is for legacy reasons, present for forward-compatibility.
914 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
915 /// from storage. Hence, we use this function to not persist default values of
916 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
917 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
918 let (q, _) = channel_value_satoshis.overflowing_div(100);
919 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
922 pub(crate) fn opt_anchors(&self) -> bool {
923 self.channel_transaction_parameters.opt_anchors.is_some()
926 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
927 // The default channel type (ie the first one we try) depends on whether the channel is
928 // public - if it is, we just go with `only_static_remotekey` as it's the only option
929 // available. If it's private, we first try `scid_privacy` as it provides better privacy
930 // with no other changes, and fall back to `only_static_remotekey`.
931 let mut ret = ChannelTypeFeatures::only_static_remote_key();
932 if !config.channel_handshake_config.announced_channel &&
933 config.channel_handshake_config.negotiate_scid_privacy &&
934 their_features.supports_scid_privacy() {
935 ret.set_scid_privacy_required();
938 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
939 // set it now. If they don't understand it, we'll fall back to our default of
940 // `only_static_remotekey`.
942 { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
943 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
944 their_features.supports_anchors_zero_fee_htlc_tx() {
945 ret.set_anchors_zero_fee_htlc_tx_required();
952 /// If we receive an error message, it may only be a rejection of the channel type we tried,
953 /// not of our ability to open any channel at all. Thus, on error, we should first call this
954 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
955 pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
956 if !self.is_outbound() || self.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
957 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
958 // We've exhausted our options
961 // We support opening a few different types of channels. Try removing our additional
962 // features one by one until we've either arrived at our default or the counterparty has
965 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
966 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
967 // checks whether the counterparty supports every feature, this would only happen if the
968 // counterparty is advertising the feature, but rejecting channels proposing the feature for
970 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
971 self.channel_type.clear_anchors_zero_fee_htlc_tx();
972 assert!(self.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
973 self.channel_transaction_parameters.opt_anchors = None;
974 } else if self.channel_type.supports_scid_privacy() {
975 self.channel_type.clear_scid_privacy();
977 self.channel_type = ChannelTypeFeatures::only_static_remote_key();
979 Ok(self.get_open_channel(chain_hash))
983 pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
984 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
985 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
986 outbound_scid_alias: u64
987 ) -> Result<Channel<Signer>, APIError>
988 where ES::Target: EntropySource,
989 SP::Target: SignerProvider<Signer = Signer>,
990 F::Target: FeeEstimator,
992 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
993 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
994 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
995 let pubkeys = holder_signer.pubkeys().clone();
997 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
998 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1000 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1001 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1003 let channel_value_msat = channel_value_satoshis * 1000;
1004 if push_msat > channel_value_msat {
1005 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1007 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1008 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1010 let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
1011 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1012 // Protocol level safety check in place, although it should never happen because
1013 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1014 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
1017 let channel_type = Self::get_initial_channel_type(&config, their_features);
1018 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1020 let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
1022 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1023 let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
1024 if value_to_self_msat < commitment_tx_fee {
1025 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1028 let mut secp_ctx = Secp256k1::new();
1029 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1031 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1032 match signer_provider.get_shutdown_scriptpubkey() {
1033 Ok(scriptpubkey) => Some(scriptpubkey),
1034 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1038 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1039 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1040 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1044 let destination_script = match signer_provider.get_destination_script() {
1045 Ok(script) => script,
1046 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1049 let temporary_channel_id = entropy_source.get_secure_random_bytes();
1054 config: LegacyChannelConfig {
1055 options: config.channel_config.clone(),
1056 announced_channel: config.channel_handshake_config.announced_channel,
1057 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1062 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1064 channel_id: temporary_channel_id,
1065 temporary_channel_id: Some(temporary_channel_id),
1066 channel_state: ChannelState::OurInitSent as u32,
1067 announcement_sigs_state: AnnouncementSigsState::NotSent,
1069 channel_value_satoshis,
1071 latest_monitor_update_id: 0,
1074 shutdown_scriptpubkey,
1077 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1078 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1081 pending_inbound_htlcs: Vec::new(),
1082 pending_outbound_htlcs: Vec::new(),
1083 holding_cell_htlc_updates: Vec::new(),
1084 pending_update_fee: None,
1085 holding_cell_update_fee: None,
1086 next_holder_htlc_id: 0,
1087 next_counterparty_htlc_id: 0,
1088 update_time_counter: 1,
1090 resend_order: RAACommitmentOrder::CommitmentFirst,
1092 monitor_pending_channel_ready: false,
1093 monitor_pending_revoke_and_ack: false,
1094 monitor_pending_commitment_signed: false,
1095 monitor_pending_forwards: Vec::new(),
1096 monitor_pending_failures: Vec::new(),
1097 monitor_pending_finalized_fulfills: Vec::new(),
1099 #[cfg(debug_assertions)]
1100 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1101 #[cfg(debug_assertions)]
1102 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
1104 last_sent_closing_fee: None,
1105 pending_counterparty_closing_signed: None,
1106 closing_fee_limits: None,
1107 target_closing_feerate_sats_per_kw: None,
1109 inbound_awaiting_accept: false,
1111 funding_tx_confirmed_in: None,
1112 funding_tx_confirmation_height: 0,
1113 short_channel_id: None,
1114 channel_creation_height: current_chain_height,
1116 feerate_per_kw: feerate,
1117 counterparty_dust_limit_satoshis: 0,
1118 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1119 counterparty_max_htlc_value_in_flight_msat: 0,
1120 holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1121 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
1122 holder_selected_channel_reserve_satoshis,
1123 counterparty_htlc_minimum_msat: 0,
1124 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1125 counterparty_max_accepted_htlcs: 0,
1126 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1127 minimum_depth: None, // Filled in in accept_channel
1129 counterparty_forwarding_info: None,
1131 channel_transaction_parameters: ChannelTransactionParameters {
1132 holder_pubkeys: pubkeys,
1133 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1134 is_outbound_from_holder: true,
1135 counterparty_parameters: None,
1136 funding_outpoint: None,
1137 opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
1138 opt_non_zero_fee_anchors: None
1140 funding_transaction: None,
1142 counterparty_cur_commitment_point: None,
1143 counterparty_prev_commitment_point: None,
1144 counterparty_node_id,
1146 counterparty_shutdown_scriptpubkey: None,
1148 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1150 channel_update_status: ChannelUpdateStatus::Enabled,
1151 closing_signed_in_flight: false,
1153 announcement_sigs: None,
1155 #[cfg(any(test, fuzzing))]
1156 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1157 #[cfg(any(test, fuzzing))]
1158 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1160 workaround_lnd_bug_4006: None,
1161 sent_message_awaiting_response: None,
1163 latest_inbound_scid_alias: None,
1164 outbound_scid_alias,
1166 channel_pending_event_emitted: false,
1167 channel_ready_event_emitted: false,
1169 #[cfg(any(test, fuzzing))]
1170 historical_inbound_htlc_fulfills: HashSet::new(),
1175 pending_monitor_updates: Vec::new(),
1179 fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
1180 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
1181 -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
1183 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
1184 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
1185 // We generally don't care too much if they set the feerate to something very high, but it
1186 // could result in the channel being useless due to everything being dust.
1187 let upper_limit = cmp::max(250 * 25,
1188 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
1189 if feerate_per_kw as u64 > upper_limit {
1190 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
1192 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
1193 // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
1194 // occasional issues with feerate disagreements between an initiator that wants a feerate
1195 // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
1196 // sat/kw before the comparison here.
1197 if feerate_per_kw + 250 < lower_limit {
1198 if let Some(cur_feerate) = cur_feerate_per_kw {
1199 if feerate_per_kw > cur_feerate {
1201 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
1202 cur_feerate, feerate_per_kw);
1206 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
1211 /// Creates a new channel from a remote sides' request for one.
1212 /// Assumes chain_hash has already been checked and corresponds with what we expect!
1213 pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
1214 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
1215 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
1216 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
1217 current_chain_height: u32, logger: &L, outbound_scid_alias: u64
1218 ) -> Result<Channel<Signer>, ChannelError>
1219 where ES::Target: EntropySource,
1220 SP::Target: SignerProvider<Signer = Signer>,
1221 F::Target: FeeEstimator,
1224 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
1226 // First check the channel type is known, failing before we do anything else if we don't
1227 // support this channel type.
1228 let channel_type = if let Some(channel_type) = &msg.channel_type {
1229 if channel_type.supports_any_optional_bits() {
1230 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
1233 // We only support the channel types defined by the `ChannelManager` in
1234 // `provided_channel_type_features`. The channel type must always support
1235 // `static_remote_key`.
1236 if !channel_type.requires_static_remote_key() {
1237 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
1239 // Make sure we support all of the features behind the channel type.
1240 if !channel_type.is_subset(our_supported_features) {
1241 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
1243 if channel_type.requires_scid_privacy() && announced_channel {
1244 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
1246 channel_type.clone()
1248 let channel_type = ChannelTypeFeatures::from_init(&their_features);
1249 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
1250 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
1254 let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
1256 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
1257 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
1258 let pubkeys = holder_signer.pubkeys().clone();
1259 let counterparty_pubkeys = ChannelPublicKeys {
1260 funding_pubkey: msg.funding_pubkey,
1261 revocation_basepoint: msg.revocation_basepoint,
1262 payment_point: msg.payment_point,
1263 delayed_payment_basepoint: msg.delayed_payment_basepoint,
1264 htlc_basepoint: msg.htlc_basepoint
1267 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1268 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1271 // Check sanity of message fields:
1272 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1273 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
1275 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1276 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
1278 if msg.channel_reserve_satoshis > msg.funding_satoshis {
1279 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
1281 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
1282 if msg.push_msat > full_channel_value_msat {
1283 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
1285 if msg.dust_limit_satoshis > msg.funding_satoshis {
1286 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
1288 if msg.htlc_minimum_msat >= full_channel_value_msat {
1289 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
1291 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
1293 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1294 if msg.to_self_delay > max_counterparty_selected_contest_delay {
1295 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
1297 if msg.max_accepted_htlcs < 1 {
1298 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1300 if msg.max_accepted_htlcs > MAX_HTLCS {
1301 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
1304 // Now check against optional parameters as set by config...
1305 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1306 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1308 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1309 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1311 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1312 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1314 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1315 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1317 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1318 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1320 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1321 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1323 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
1324 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1327 // Convert things into internal flags and prep our state:
1329 if config.channel_handshake_limits.force_announced_channel_preference {
1330 if config.channel_handshake_config.announced_channel != announced_channel {
1331 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1335 let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
1336 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1337 // Protocol level safety check in place, although it should never happen because
1338 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1339 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1341 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1342 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
1344 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1345 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1346 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1348 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
1349 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1352 // check if the funder's amount for the initial commitment tx is sufficient
1353 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1354 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
1355 let commitment_tx_fee = Self::commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
1356 if funders_amount_msat / 1000 < commitment_tx_fee {
1357 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
1360 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
1361 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1362 // want to push much to us), our counterparty should always have more than our reserve.
1363 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1364 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1367 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1368 match &msg.shutdown_scriptpubkey {
1369 &Some(ref script) => {
1370 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1371 if script.len() == 0 {
1374 if !script::is_bolt2_compliant(&script, their_features) {
1375 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1377 Some(script.clone())
1380 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1382 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1387 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1388 match signer_provider.get_shutdown_scriptpubkey() {
1389 Ok(scriptpubkey) => Some(scriptpubkey),
1390 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1394 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1395 if !shutdown_scriptpubkey.is_compatible(&their_features) {
1396 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1400 let destination_script = match signer_provider.get_destination_script() {
1401 Ok(script) => script,
1402 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1405 let mut secp_ctx = Secp256k1::new();
1406 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1408 let chan = Channel {
1411 config: LegacyChannelConfig {
1412 options: config.channel_config.clone(),
1414 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1419 inbound_handshake_limits_override: None,
1421 channel_id: msg.temporary_channel_id,
1422 temporary_channel_id: Some(msg.temporary_channel_id),
1423 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
1424 announcement_sigs_state: AnnouncementSigsState::NotSent,
1427 latest_monitor_update_id: 0,
1430 shutdown_scriptpubkey,
1433 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1434 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1435 value_to_self_msat: msg.push_msat,
1437 pending_inbound_htlcs: Vec::new(),
1438 pending_outbound_htlcs: Vec::new(),
1439 holding_cell_htlc_updates: Vec::new(),
1440 pending_update_fee: None,
1441 holding_cell_update_fee: None,
1442 next_holder_htlc_id: 0,
1443 next_counterparty_htlc_id: 0,
1444 update_time_counter: 1,
1446 resend_order: RAACommitmentOrder::CommitmentFirst,
1448 monitor_pending_channel_ready: false,
1449 monitor_pending_revoke_and_ack: false,
1450 monitor_pending_commitment_signed: false,
1451 monitor_pending_forwards: Vec::new(),
1452 monitor_pending_failures: Vec::new(),
1453 monitor_pending_finalized_fulfills: Vec::new(),
1455 #[cfg(debug_assertions)]
1456 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
1457 #[cfg(debug_assertions)]
1458 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
1460 last_sent_closing_fee: None,
1461 pending_counterparty_closing_signed: None,
1462 closing_fee_limits: None,
1463 target_closing_feerate_sats_per_kw: None,
1465 inbound_awaiting_accept: true,
1467 funding_tx_confirmed_in: None,
1468 funding_tx_confirmation_height: 0,
1469 short_channel_id: None,
1470 channel_creation_height: current_chain_height,
1472 feerate_per_kw: msg.feerate_per_kw,
1473 channel_value_satoshis: msg.funding_satoshis,
1474 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
1475 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1476 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
1477 holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
1478 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
1479 holder_selected_channel_reserve_satoshis,
1480 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
1481 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1482 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
1483 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1484 minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
1486 counterparty_forwarding_info: None,
1488 channel_transaction_parameters: ChannelTransactionParameters {
1489 holder_pubkeys: pubkeys,
1490 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1491 is_outbound_from_holder: false,
1492 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1493 selected_contest_delay: msg.to_self_delay,
1494 pubkeys: counterparty_pubkeys,
1496 funding_outpoint: None,
1497 opt_anchors: if opt_anchors { Some(()) } else { None },
1498 opt_non_zero_fee_anchors: None
1500 funding_transaction: None,
1502 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
1503 counterparty_prev_commitment_point: None,
1504 counterparty_node_id,
1506 counterparty_shutdown_scriptpubkey,
1508 commitment_secrets: CounterpartyCommitmentSecrets::new(),
1510 channel_update_status: ChannelUpdateStatus::Enabled,
1511 closing_signed_in_flight: false,
1513 announcement_sigs: None,
1515 #[cfg(any(test, fuzzing))]
1516 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1517 #[cfg(any(test, fuzzing))]
1518 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1520 workaround_lnd_bug_4006: None,
1521 sent_message_awaiting_response: None,
1523 latest_inbound_scid_alias: None,
1524 outbound_scid_alias,
1526 channel_pending_event_emitted: false,
1527 channel_ready_event_emitted: false,
1529 #[cfg(any(test, fuzzing))]
1530 historical_inbound_htlc_fulfills: HashSet::new(),
1535 pending_monitor_updates: Vec::new(),
1541 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1542 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1543 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1544 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1545 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1547 /// @local is used only to convert relevant internal structures which refer to remote vs local
1548 /// to decide value of outputs and direction of HTLCs.
1549 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1550 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1551 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1552 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1553 /// which peer generated this transaction and "to whom" this transaction flows.
1555 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1556 where L::Target: Logger
1558 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1559 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1560 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1562 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1563 let mut remote_htlc_total_msat = 0;
1564 let mut local_htlc_total_msat = 0;
1565 let mut value_to_self_msat_offset = 0;
1567 let mut feerate_per_kw = self.feerate_per_kw;
1568 if let Some((feerate, update_state)) = self.pending_update_fee {
1569 if match update_state {
1570 // Note that these match the inclusion criteria when scanning
1571 // pending_inbound_htlcs below.
1572 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1573 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1574 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1576 feerate_per_kw = feerate;
1580 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1581 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1582 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1583 log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1585 macro_rules! get_htlc_in_commitment {
1586 ($htlc: expr, $offered: expr) => {
1587 HTLCOutputInCommitment {
1589 amount_msat: $htlc.amount_msat,
1590 cltv_expiry: $htlc.cltv_expiry,
1591 payment_hash: $htlc.payment_hash,
1592 transaction_output_index: None
1597 macro_rules! add_htlc_output {
1598 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1599 if $outbound == local { // "offered HTLC output"
1600 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1601 let htlc_tx_fee = if self.opt_anchors() {
1604 feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
1606 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1607 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1608 included_non_dust_htlcs.push((htlc_in_tx, $source));
1610 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1611 included_dust_htlcs.push((htlc_in_tx, $source));
1614 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1615 let htlc_tx_fee = if self.opt_anchors() {
1618 feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
1620 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1621 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1622 included_non_dust_htlcs.push((htlc_in_tx, $source));
1624 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
1625 included_dust_htlcs.push((htlc_in_tx, $source));
1631 for ref htlc in self.pending_inbound_htlcs.iter() {
1632 let (include, state_name) = match htlc.state {
1633 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1634 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1635 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1636 InboundHTLCState::Committed => (true, "Committed"),
1637 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1641 add_htlc_output!(htlc, false, None, state_name);
1642 remote_htlc_total_msat += htlc.amount_msat;
1644 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1646 &InboundHTLCState::LocalRemoved(ref reason) => {
1647 if generated_by_local {
1648 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1649 value_to_self_msat_offset += htlc.amount_msat as i64;
1658 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1660 for ref htlc in self.pending_outbound_htlcs.iter() {
1661 let (include, state_name) = match htlc.state {
1662 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1663 OutboundHTLCState::Committed => (true, "Committed"),
1664 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1665 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1666 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1669 let preimage_opt = match htlc.state {
1670 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1671 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1672 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1676 if let Some(preimage) = preimage_opt {
1677 preimages.push(preimage);
1681 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1682 local_htlc_total_msat += htlc.amount_msat;
1684 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name);
1686 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1687 value_to_self_msat_offset -= htlc.amount_msat as i64;
1689 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1690 if !generated_by_local {
1691 value_to_self_msat_offset -= htlc.amount_msat as i64;
1699 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1700 assert!(value_to_self_msat >= 0);
1701 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1702 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1703 // "violate" their reserve value by couting those against it. Thus, we have to convert
1704 // everything to i64 before subtracting as otherwise we can overflow.
1705 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1706 assert!(value_to_remote_msat >= 0);
1708 #[cfg(debug_assertions)]
1710 // Make sure that the to_self/to_remote is always either past the appropriate
1711 // channel_reserve *or* it is making progress towards it.
1712 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1713 self.holder_max_commitment_tx_output.lock().unwrap()
1715 self.counterparty_max_commitment_tx_output.lock().unwrap()
1717 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1718 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1719 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1720 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1723 let total_fee_sat = Channel::<Signer>::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
1724 let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1725 let (value_to_self, value_to_remote) = if self.is_outbound() {
1726 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1728 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1731 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1732 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1733 let (funding_pubkey_a, funding_pubkey_b) = if local {
1734 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1736 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1739 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1740 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1745 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1746 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1751 let num_nondust_htlcs = included_non_dust_htlcs.len();
1753 let channel_parameters =
1754 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1755 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1756 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1759 self.channel_transaction_parameters.opt_anchors.is_some(),
1764 &mut included_non_dust_htlcs,
1767 let mut htlcs_included = included_non_dust_htlcs;
1768 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1769 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1770 htlcs_included.append(&mut included_dust_htlcs);
1772 // For the stats, trimmed-to-0 the value in msats accordingly
1773 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1774 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1782 local_balance_msat: value_to_self_msat as u64,
1783 remote_balance_msat: value_to_remote_msat as u64,
1789 fn get_closing_scriptpubkey(&self) -> Script {
1790 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
1791 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
1792 // outside of those situations will fail.
1793 self.shutdown_scriptpubkey.clone().unwrap().into_inner()
1797 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
1802 1 + // script length (0)
1806 )*4 + // * 4 for non-witness parts
1807 2 + // witness marker and flag
1808 1 + // witness element count
1809 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
1810 self.get_funding_redeemscript().len() as u64 + // funding witness script
1811 2*(1 + 71); // two signatures + sighash type flags
1812 if let Some(spk) = a_scriptpubkey {
1813 ret += ((8+1) + // output values and script length
1814 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
1816 if let Some(spk) = b_scriptpubkey {
1817 ret += ((8+1) + // output values and script length
1818 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
1824 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
1825 assert!(self.pending_inbound_htlcs.is_empty());
1826 assert!(self.pending_outbound_htlcs.is_empty());
1827 assert!(self.pending_update_fee.is_none());
1829 let mut total_fee_satoshis = proposed_total_fee_satoshis;
1830 let mut value_to_holder: i64 = (self.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 };
1831 let mut value_to_counterparty: i64 = ((self.channel_value_satoshis * 1000 - self.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 };
1833 if value_to_holder < 0 {
1834 assert!(self.is_outbound());
1835 total_fee_satoshis += (-value_to_holder) as u64;
1836 } else if value_to_counterparty < 0 {
1837 assert!(!self.is_outbound());
1838 total_fee_satoshis += (-value_to_counterparty) as u64;
1841 if skip_remote_output || value_to_counterparty as u64 <= self.holder_dust_limit_satoshis {
1842 value_to_counterparty = 0;
1845 if value_to_holder as u64 <= self.holder_dust_limit_satoshis {
1846 value_to_holder = 0;
1849 assert!(self.shutdown_scriptpubkey.is_some());
1850 let holder_shutdown_script = self.get_closing_scriptpubkey();
1851 let counterparty_shutdown_script = self.counterparty_shutdown_scriptpubkey.clone().unwrap();
1852 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
1854 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
1855 (closing_transaction, total_fee_satoshis)
1858 fn funding_outpoint(&self) -> OutPoint {
1859 self.channel_transaction_parameters.funding_outpoint.unwrap()
1863 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1864 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1865 /// our counterparty!)
1866 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1867 /// TODO Some magic rust shit to compile-time check this?
1868 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1869 let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
1870 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1871 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1872 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1874 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1878 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1879 /// will sign and send to our counterparty.
1880 /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
1881 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1882 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1883 //may see payments to it!
1884 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1885 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1886 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1888 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1891 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1892 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1893 /// Panics if called before accept_channel/new_from_req
1894 pub fn get_funding_redeemscript(&self) -> Script {
1895 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1898 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
1901 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
1902 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
1904 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
1906 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
1907 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
1908 where L::Target: Logger {
1909 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
1910 // (see equivalent if condition there).
1911 assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
1912 let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update
1913 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
1914 self.latest_monitor_update_id = mon_update_id;
1915 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
1916 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
1920 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
1921 // Either ChannelReady got set (which means it won't be unset) or there is no way any
1922 // caller thought we could have something claimed (cause we wouldn't have accepted in an
1923 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
1925 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
1926 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
1928 assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
1930 let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
1932 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
1933 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
1934 // these, but for now we just have to treat them as normal.
1936 let mut pending_idx = core::usize::MAX;
1937 let mut htlc_value_msat = 0;
1938 for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
1939 if htlc.htlc_id == htlc_id_arg {
1940 assert_eq!(htlc.payment_hash, payment_hash_calc);
1942 InboundHTLCState::Committed => {},
1943 InboundHTLCState::LocalRemoved(ref reason) => {
1944 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1946 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
1947 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
1949 return UpdateFulfillFetch::DuplicateClaim {};
1952 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1953 // Don't return in release mode here so that we can update channel_monitor
1957 htlc_value_msat = htlc.amount_msat;
1961 if pending_idx == core::usize::MAX {
1962 #[cfg(any(test, fuzzing))]
1963 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
1964 // this is simply a duplicate claim, not previously failed and we lost funds.
1965 debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
1966 return UpdateFulfillFetch::DuplicateClaim {};
1969 // Now update local state:
1971 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
1972 // can claim it even if the channel hits the chain before we see their next commitment.
1973 self.latest_monitor_update_id += 1;
1974 let monitor_update = ChannelMonitorUpdate {
1975 update_id: self.latest_monitor_update_id,
1976 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
1977 payment_preimage: payment_preimage_arg.clone(),
1981 if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
1982 // Note that this condition is the same as the assertion in
1983 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
1984 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
1985 // do not not get into this branch.
1986 for pending_update in self.holding_cell_htlc_updates.iter() {
1987 match pending_update {
1988 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
1989 if htlc_id_arg == htlc_id {
1990 // Make sure we don't leave latest_monitor_update_id incremented here:
1991 self.latest_monitor_update_id -= 1;
1992 #[cfg(any(test, fuzzing))]
1993 debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
1994 return UpdateFulfillFetch::DuplicateClaim {};
1997 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
1998 if htlc_id_arg == htlc_id {
1999 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
2000 // TODO: We may actually be able to switch to a fulfill here, though its
2001 // rare enough it may not be worth the complexity burden.
2002 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2003 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2009 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state);
2010 self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2011 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2013 #[cfg(any(test, fuzzing))]
2014 self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2015 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2017 #[cfg(any(test, fuzzing))]
2018 self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2021 let htlc = &mut self.pending_inbound_htlcs[pending_idx];
2022 if let InboundHTLCState::Committed = htlc.state {
2024 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2025 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2027 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
2028 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2031 UpdateFulfillFetch::NewClaim {
2034 msg: Some(msgs::UpdateFulfillHTLC {
2035 channel_id: self.channel_id(),
2036 htlc_id: htlc_id_arg,
2037 payment_preimage: payment_preimage_arg,
2042 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2043 let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
2044 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2045 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2046 // Even if we aren't supposed to let new monitor updates with commitment state
2047 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2048 // matter what. Sadly, to push a new monitor update which flies before others
2049 // already queued, we have to insert it into the pending queue and update the
2050 // update_ids of all the following monitors.
2051 let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
2052 let mut additional_update = self.build_commitment_no_status_check(logger);
2053 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2054 // to be strictly increasing by one, so decrement it here.
2055 self.latest_monitor_update_id = monitor_update.update_id;
2056 monitor_update.updates.append(&mut additional_update.updates);
2057 self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2058 update: monitor_update, blocked: false,
2060 self.pending_monitor_updates.len() - 1
2062 let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked)
2063 .unwrap_or(self.pending_monitor_updates.len());
2064 let new_mon_id = self.pending_monitor_updates.get(insert_pos)
2065 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2066 monitor_update.update_id = new_mon_id;
2067 self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
2068 update: monitor_update, blocked: false,
2070 for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
2071 held_update.update.update_id += 1;
2074 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2075 let update = self.build_commitment_no_status_check(logger);
2076 self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
2077 update, blocked: true,
2082 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2083 UpdateFulfillCommitFetch::NewClaim {
2084 monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos)
2085 .expect("We just pushed the monitor update").update,
2089 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2093 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2094 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2095 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2096 /// before we fail backwards.
2098 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2099 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2100 /// [`ChannelError::Ignore`].
2101 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2102 -> Result<(), ChannelError> where L::Target: Logger {
2103 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2104 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2107 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2108 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2109 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2110 /// before we fail backwards.
2112 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2113 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2114 /// [`ChannelError::Ignore`].
2115 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2116 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2117 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2118 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2120 assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
2122 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2123 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2124 // these, but for now we just have to treat them as normal.
2126 let mut pending_idx = core::usize::MAX;
2127 for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
2128 if htlc.htlc_id == htlc_id_arg {
2130 InboundHTLCState::Committed => {},
2131 InboundHTLCState::LocalRemoved(ref reason) => {
2132 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2134 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2139 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2140 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2146 if pending_idx == core::usize::MAX {
2147 #[cfg(any(test, fuzzing))]
2148 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2149 // is simply a duplicate fail, not previously failed and we failed-back too early.
2150 debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2154 if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2155 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2156 force_holding_cell = true;
2159 // Now update local state:
2160 if force_holding_cell {
2161 for pending_update in self.holding_cell_htlc_updates.iter() {
2162 match pending_update {
2163 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2164 if htlc_id_arg == htlc_id {
2165 #[cfg(any(test, fuzzing))]
2166 debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2170 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2171 if htlc_id_arg == htlc_id {
2172 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2173 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2179 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
2180 self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2181 htlc_id: htlc_id_arg,
2187 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
2189 let htlc = &mut self.pending_inbound_htlcs[pending_idx];
2190 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2193 Ok(Some(msgs::UpdateFailHTLC {
2194 channel_id: self.channel_id(),
2195 htlc_id: htlc_id_arg,
2200 // Message handlers:
2202 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
2203 let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits };
2205 // Check sanity of message fields:
2206 if !self.is_outbound() {
2207 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
2209 if self.channel_state != ChannelState::OurInitSent as u32 {
2210 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
2212 if msg.dust_limit_satoshis > 21000000 * 100000000 {
2213 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
2215 if msg.channel_reserve_satoshis > self.channel_value_satoshis {
2216 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.channel_value_satoshis)));
2218 if msg.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
2219 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
2221 if msg.channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
2222 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
2223 msg.channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
2225 let full_channel_value_msat = (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
2226 if msg.htlc_minimum_msat >= full_channel_value_msat {
2227 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
2229 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
2230 if msg.to_self_delay > max_delay_acceptable {
2231 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
2233 if msg.max_accepted_htlcs < 1 {
2234 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
2236 if msg.max_accepted_htlcs > MAX_HTLCS {
2237 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
2240 // Now check against optional parameters as set by config...
2241 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
2242 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
2244 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
2245 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
2247 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
2248 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
2250 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
2251 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
2253 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
2254 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
2256 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
2257 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
2259 if msg.minimum_depth > peer_limits.max_minimum_depth {
2260 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
2263 if let Some(ty) = &msg.channel_type {
2264 if *ty != self.channel_type {
2265 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
2267 } else if their_features.supports_channel_type() {
2268 // Assume they've accepted the channel type as they said they understand it.
2270 let channel_type = ChannelTypeFeatures::from_init(&their_features);
2271 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
2272 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
2274 self.channel_type = channel_type;
2277 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
2278 match &msg.shutdown_scriptpubkey {
2279 &Some(ref script) => {
2280 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
2281 if script.len() == 0 {
2284 if !script::is_bolt2_compliant(&script, their_features) {
2285 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
2287 Some(script.clone())
2290 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
2292 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
2297 self.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
2298 self.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
2299 self.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
2300 self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
2301 self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
2303 if peer_limits.trust_own_funding_0conf {
2304 self.minimum_depth = Some(msg.minimum_depth);
2306 self.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
2309 let counterparty_pubkeys = ChannelPublicKeys {
2310 funding_pubkey: msg.funding_pubkey,
2311 revocation_basepoint: msg.revocation_basepoint,
2312 payment_point: msg.payment_point,
2313 delayed_payment_basepoint: msg.delayed_payment_basepoint,
2314 htlc_basepoint: msg.htlc_basepoint
2317 self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
2318 selected_contest_delay: msg.to_self_delay,
2319 pubkeys: counterparty_pubkeys,
2322 self.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
2323 self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
2325 self.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
2326 self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
2331 fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
2332 let funding_script = self.get_funding_redeemscript();
2334 let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
2335 let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
2337 let trusted_tx = initial_commitment_tx.trust();
2338 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2339 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
2340 // They sign the holder commitment transaction...
2341 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
2342 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
2343 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
2344 encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
2345 secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
2348 let counterparty_keys = self.build_remote_transaction_keys();
2349 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2351 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2352 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2353 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2354 log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2356 let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2357 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
2359 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2360 Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
2363 fn counterparty_funding_pubkey(&self) -> &PublicKey {
2364 &self.get_counterparty_pubkeys().funding_pubkey
2367 pub fn funding_created<SP: Deref, L: Deref>(
2368 &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
2369 ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
2371 SP::Target: SignerProvider<Signer = Signer>,
2374 if self.is_outbound() {
2375 return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
2377 if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
2378 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
2379 // remember the channel, so it's safe to just send an error_message here and drop the
2381 return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
2383 if self.inbound_awaiting_accept {
2384 return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
2386 if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2387 self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2388 self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2389 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2392 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
2393 self.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
2394 // This is an externally observable change before we finish all our checks. In particular
2395 // funding_created_signature may fail.
2396 self.holder_signer.provide_channel_parameters(&self.channel_transaction_parameters);
2398 let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
2400 Err(ChannelError::Close(e)) => {
2401 self.channel_transaction_parameters.funding_outpoint = None;
2402 return Err(ChannelError::Close(e));
2405 // The only error we know how to handle is ChannelError::Close, so we fall over here
2406 // to make sure we don't continue with an inconsistent state.
2407 panic!("unexpected error type from funding_created_signature {:?}", e);
2411 let holder_commitment_tx = HolderCommitmentTransaction::new(
2412 initial_commitment_tx,
2415 &self.get_holder_pubkeys().funding_pubkey,
2416 self.counterparty_funding_pubkey()
2419 self.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
2420 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2422 // Now that we're past error-generating stuff, update our local state:
2424 let funding_redeemscript = self.get_funding_redeemscript();
2425 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2426 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
2427 let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2428 let mut monitor_signer = signer_provider.derive_channel_signer(self.channel_value_satoshis, self.channel_keys_id);
2429 monitor_signer.provide_channel_parameters(&self.channel_transaction_parameters);
2430 let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), monitor_signer,
2431 shutdown_script, self.get_holder_selected_contest_delay(),
2432 &self.destination_script, (funding_txo, funding_txo_script.clone()),
2433 &self.channel_transaction_parameters,
2434 funding_redeemscript.clone(), self.channel_value_satoshis,
2436 holder_commitment_tx, best_block, self.counterparty_node_id);
2438 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
2440 self.channel_state = ChannelState::FundingSent as u32;
2441 self.channel_id = funding_txo.to_channel_id();
2442 self.cur_counterparty_commitment_transaction_number -= 1;
2443 self.cur_holder_commitment_transaction_number -= 1;
2445 log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
2447 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2448 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2450 Ok((msgs::FundingSigned {
2451 channel_id: self.channel_id,
2454 partial_signature_with_nonce: None,
2455 }, channel_monitor))
2458 /// Handles a funding_signed message from the remote end.
2459 /// If this call is successful, broadcast the funding transaction (and not before!)
2460 pub fn funding_signed<SP: Deref, L: Deref>(
2461 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2462 ) -> Result<ChannelMonitor<Signer>, ChannelError>
2464 SP::Target: SignerProvider<Signer = Signer>,
2467 if !self.is_outbound() {
2468 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2470 if self.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2471 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2473 if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2474 self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2475 self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2476 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2479 let funding_script = self.get_funding_redeemscript();
2481 let counterparty_keys = self.build_remote_transaction_keys();
2482 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2483 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2484 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2486 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2487 log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2489 let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
2490 let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2492 let trusted_tx = initial_commitment_tx.trust();
2493 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2494 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
2495 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2496 if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
2497 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2501 let holder_commitment_tx = HolderCommitmentTransaction::new(
2502 initial_commitment_tx,
2505 &self.get_holder_pubkeys().funding_pubkey,
2506 self.counterparty_funding_pubkey()
2509 self.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
2510 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2513 let funding_redeemscript = self.get_funding_redeemscript();
2514 let funding_txo = self.get_funding_txo().unwrap();
2515 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2516 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
2517 let shutdown_script = self.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2518 let mut monitor_signer = signer_provider.derive_channel_signer(self.channel_value_satoshis, self.channel_keys_id);
2519 monitor_signer.provide_channel_parameters(&self.channel_transaction_parameters);
2520 let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), monitor_signer,
2521 shutdown_script, self.get_holder_selected_contest_delay(),
2522 &self.destination_script, (funding_txo, funding_txo_script),
2523 &self.channel_transaction_parameters,
2524 funding_redeemscript.clone(), self.channel_value_satoshis,
2526 holder_commitment_tx, best_block, self.counterparty_node_id);
2528 channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
2530 assert_eq!(self.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2531 self.channel_state = ChannelState::FundingSent as u32;
2532 self.cur_holder_commitment_transaction_number -= 1;
2533 self.cur_counterparty_commitment_transaction_number -= 1;
2535 log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
2537 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2538 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2542 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2543 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2545 pub fn channel_ready<NS: Deref, L: Deref>(
2546 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
2547 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2548 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2550 NS::Target: NodeSigner,
2553 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2554 self.workaround_lnd_bug_4006 = Some(msg.clone());
2555 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2558 if let Some(scid_alias) = msg.short_channel_id_alias {
2559 if Some(scid_alias) != self.short_channel_id {
2560 // The scid alias provided can be used to route payments *from* our counterparty,
2561 // i.e. can be used for inbound payments and provided in invoices, but is not used
2562 // when routing outbound payments.
2563 self.latest_inbound_scid_alias = Some(scid_alias);
2567 let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
2569 if non_shutdown_state == ChannelState::FundingSent as u32 {
2570 self.channel_state |= ChannelState::TheirChannelReady as u32;
2571 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2572 self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS);
2573 self.update_time_counter += 1;
2574 } else if self.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2575 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2576 (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2577 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2579 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2580 // required, or they're sending a fresh SCID alias.
2581 let expected_point =
2582 if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2583 // If they haven't ever sent an updated point, the point they send should match
2585 self.counterparty_cur_commitment_point
2586 } else if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2587 // If we've advanced the commitment number once, the second commitment point is
2588 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2589 debug_assert!(self.counterparty_prev_commitment_point.is_some());
2590 self.counterparty_prev_commitment_point
2592 // If they have sent updated points, channel_ready is always supposed to match
2593 // their "first" point, which we re-derive here.
2594 Some(PublicKey::from_secret_key(&self.secp_ctx, &SecretKey::from_slice(
2595 &self.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2596 ).expect("We already advanced, so previous secret keys should have been validated already")))
2598 if expected_point != Some(msg.next_per_commitment_point) {
2599 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2603 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2606 self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
2607 self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2609 log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
2611 Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
2614 /// Returns transaction if there is pending funding transaction that is yet to broadcast
2615 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2616 if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
2617 self.funding_transaction.clone()
2623 /// Returns a HTLCStats about inbound pending htlcs
2624 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2625 let mut stats = HTLCStats {
2626 pending_htlcs: self.pending_inbound_htlcs.len() as u32,
2627 pending_htlcs_value_msat: 0,
2628 on_counterparty_tx_dust_exposure_msat: 0,
2629 on_holder_tx_dust_exposure_msat: 0,
2630 holding_cell_msat: 0,
2631 on_holder_tx_holding_cell_htlcs_count: 0,
2634 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() {
2637 let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2638 (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
2639 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
2641 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis;
2642 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2643 for ref htlc in self.pending_inbound_htlcs.iter() {
2644 stats.pending_htlcs_value_msat += htlc.amount_msat;
2645 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2646 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2648 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2649 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2655 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2656 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2657 let mut stats = HTLCStats {
2658 pending_htlcs: self.pending_outbound_htlcs.len() as u32,
2659 pending_htlcs_value_msat: 0,
2660 on_counterparty_tx_dust_exposure_msat: 0,
2661 on_holder_tx_dust_exposure_msat: 0,
2662 holding_cell_msat: 0,
2663 on_holder_tx_holding_cell_htlcs_count: 0,
2666 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() {
2669 let dust_buffer_feerate = self.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2670 (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
2671 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
2673 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis;
2674 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2675 for ref htlc in self.pending_outbound_htlcs.iter() {
2676 stats.pending_htlcs_value_msat += htlc.amount_msat;
2677 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2678 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2680 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2681 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2685 for update in self.holding_cell_htlc_updates.iter() {
2686 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2687 stats.pending_htlcs += 1;
2688 stats.pending_htlcs_value_msat += amount_msat;
2689 stats.holding_cell_msat += amount_msat;
2690 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2691 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2693 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2694 stats.on_holder_tx_dust_exposure_msat += amount_msat;
2696 stats.on_holder_tx_holding_cell_htlcs_count += 1;
2703 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2704 /// Doesn't bother handling the
2705 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2706 /// corner case properly.
2707 pub fn get_available_balances(&self) -> AvailableBalances {
2708 // Note that we have to handle overflow due to the above case.
2709 let inbound_stats = self.get_inbound_pending_htlc_stats(None);
2710 let outbound_stats = self.get_outbound_pending_htlc_stats(None);
2712 let mut balance_msat = self.value_to_self_msat;
2713 for ref htlc in self.pending_inbound_htlcs.iter() {
2714 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2715 balance_msat += htlc.amount_msat;
2718 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2720 let outbound_capacity_msat = self.value_to_self_msat
2721 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2723 self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2725 let mut available_capacity_msat = outbound_capacity_msat;
2727 if self.is_outbound() {
2728 // We should mind channel commit tx fee when computing how much of the available capacity
2729 // can be used in the next htlc. Mirrors the logic in send_htlc.
2731 // The fee depends on whether the amount we will be sending is above dust or not,
2732 // and the answer will in turn change the amount itself — making it a circular
2734 // This complicates the computation around dust-values, up to the one-htlc-value.
2735 let mut real_dust_limit_timeout_sat = self.holder_dust_limit_satoshis;
2736 if !self.opt_anchors() {
2737 real_dust_limit_timeout_sat += self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
2740 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2741 let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2742 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2743 let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2745 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2746 // value ends up being below dust, we have this fee available again. In that case,
2747 // match the value to right-below-dust.
2748 let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
2749 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2750 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2751 debug_assert!(one_htlc_difference_msat != 0);
2752 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2753 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2754 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2756 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2759 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2760 // sending a new HTLC won't reduce their balance below our reserve threshold.
2761 let mut real_dust_limit_success_sat = self.counterparty_dust_limit_satoshis;
2762 if !self.opt_anchors() {
2763 real_dust_limit_success_sat += self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
2766 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2767 let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2769 let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000;
2770 let remote_balance_msat = (self.channel_value_satoshis * 1000 - self.value_to_self_msat)
2771 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2773 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
2774 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2775 // we've selected for them, we can only send dust HTLCs.
2776 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2780 let mut next_outbound_htlc_minimum_msat = self.counterparty_htlc_minimum_msat;
2782 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2783 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2784 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2785 // send above the dust limit (as the router can always overpay to meet the dust limit).
2786 let mut remaining_msat_below_dust_exposure_limit = None;
2787 let mut dust_exposure_dust_limit_msat = 0;
2789 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
2790 (self.counterparty_dust_limit_satoshis, self.holder_dust_limit_satoshis)
2792 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2793 (self.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
2794 self.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
2796 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2797 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
2798 remaining_msat_below_dust_exposure_limit =
2799 Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2800 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2803 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2804 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
2805 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2806 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2807 self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
2808 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2811 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2812 if available_capacity_msat < dust_exposure_dust_limit_msat {
2813 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2815 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2819 available_capacity_msat = cmp::min(available_capacity_msat,
2820 self.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2822 if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
2823 available_capacity_msat = 0;
2827 inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000
2828 - self.value_to_self_msat as i64
2829 - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2830 - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
2832 outbound_capacity_msat,
2833 next_outbound_htlc_limit_msat: available_capacity_msat,
2834 next_outbound_htlc_minimum_msat,
2839 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2840 (self.holder_selected_channel_reserve_satoshis, self.counterparty_selected_channel_reserve_satoshis)
2843 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2844 // Note that num_htlcs should not include dust HTLCs.
2845 fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
2846 // Note that we need to divide before multiplying to round properly,
2847 // since the lowest denomination of bitcoin on-chain is the satoshi.
2848 (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2851 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2852 // Note that num_htlcs should not include dust HTLCs.
2854 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
2855 feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2858 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2859 /// number of pending HTLCs that are on track to be in our next commitment tx.
2861 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2862 /// `fee_spike_buffer_htlc` is `Some`.
2864 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2865 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2867 /// Dust HTLCs are excluded.
2868 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2869 assert!(self.is_outbound());
2871 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
2874 (self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
2875 self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
2877 let real_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2878 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2880 let mut addl_htlcs = 0;
2881 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2883 HTLCInitiator::LocalOffered => {
2884 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2888 HTLCInitiator::RemoteOffered => {
2889 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2895 let mut included_htlcs = 0;
2896 for ref htlc in self.pending_inbound_htlcs.iter() {
2897 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2900 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2901 // transaction including this HTLC if it times out before they RAA.
2902 included_htlcs += 1;
2905 for ref htlc in self.pending_outbound_htlcs.iter() {
2906 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2910 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2911 OutboundHTLCState::Committed => included_htlcs += 1,
2912 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2913 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2914 // transaction won't be generated until they send us their next RAA, which will mean
2915 // dropping any HTLCs in this state.
2920 for htlc in self.holding_cell_htlc_updates.iter() {
2922 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2923 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2928 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2929 // ack we're guaranteed to never include them in commitment txs anymore.
2933 let num_htlcs = included_htlcs + addl_htlcs;
2934 let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors());
2935 #[cfg(any(test, fuzzing))]
2938 if fee_spike_buffer_htlc.is_some() {
2939 fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors());
2941 let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len()
2942 + self.holding_cell_htlc_updates.len();
2943 let commitment_tx_info = CommitmentTxInfoCached {
2945 total_pending_htlcs,
2946 next_holder_htlc_id: match htlc.origin {
2947 HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1,
2948 HTLCInitiator::RemoteOffered => self.next_holder_htlc_id,
2950 next_counterparty_htlc_id: match htlc.origin {
2951 HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id,
2952 HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1,
2954 feerate: self.feerate_per_kw,
2956 *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2961 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2962 /// pending HTLCs that are on track to be in their next commitment tx
2964 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2965 /// `fee_spike_buffer_htlc` is `Some`.
2967 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2968 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2970 /// Dust HTLCs are excluded.
2971 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2972 assert!(!self.is_outbound());
2974 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
2977 (self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
2978 self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
2980 let real_dust_limit_success_sat = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis;
2981 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis;
2983 let mut addl_htlcs = 0;
2984 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2986 HTLCInitiator::LocalOffered => {
2987 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2991 HTLCInitiator::RemoteOffered => {
2992 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2998 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2999 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3000 // committed outbound HTLCs, see below.
3001 let mut included_htlcs = 0;
3002 for ref htlc in self.pending_inbound_htlcs.iter() {
3003 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3006 included_htlcs += 1;
3009 for ref htlc in self.pending_outbound_htlcs.iter() {
3010 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3013 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3014 // i.e. if they've responded to us with an RAA after announcement.
3016 OutboundHTLCState::Committed => included_htlcs += 1,
3017 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3018 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3023 let num_htlcs = included_htlcs + addl_htlcs;
3024 let res = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs, self.opt_anchors());
3025 #[cfg(any(test, fuzzing))]
3028 if fee_spike_buffer_htlc.is_some() {
3029 fee = Self::commit_tx_fee_msat(self.feerate_per_kw, num_htlcs - 1, self.opt_anchors());
3031 let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
3032 let commitment_tx_info = CommitmentTxInfoCached {
3034 total_pending_htlcs,
3035 next_holder_htlc_id: match htlc.origin {
3036 HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1,
3037 HTLCInitiator::RemoteOffered => self.next_holder_htlc_id,
3039 next_counterparty_htlc_id: match htlc.origin {
3040 HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id,
3041 HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1,
3043 feerate: self.feerate_per_kw,
3045 *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3050 pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
3051 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
3052 // We can't accept HTLCs sent after we've sent a shutdown.
3053 let local_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
3054 if local_sent_shutdown {
3055 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3057 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3058 let remote_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
3059 if remote_sent_shutdown {
3060 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3062 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3063 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3065 if msg.amount_msat > self.channel_value_satoshis * 1000 {
3066 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3068 if msg.amount_msat == 0 {
3069 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3071 if msg.amount_msat < self.holder_htlc_minimum_msat {
3072 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.holder_htlc_minimum_msat, msg.amount_msat)));
3075 let inbound_stats = self.get_inbound_pending_htlc_stats(None);
3076 let outbound_stats = self.get_outbound_pending_htlc_stats(None);
3077 if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 {
3078 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs)));
3080 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat {
3081 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat)));
3083 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3084 // the reserve_satoshis we told them to always have as direct payment so that they lose
3085 // something if we punish them for broadcasting an old state).
3086 // Note that we don't really care about having a small/no to_remote output in our local
3087 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3088 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3089 // present in the next commitment transaction we send them (at least for fulfilled ones,
3090 // failed ones won't modify value_to_self).
3091 // Note that we will send HTLCs which another instance of rust-lightning would think
3092 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3093 // Channel state once they will not be present in the next received commitment
3095 let mut removed_outbound_total_msat = 0;
3096 for ref htlc in self.pending_outbound_htlcs.iter() {
3097 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3098 removed_outbound_total_msat += htlc.amount_msat;
3099 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3100 removed_outbound_total_msat += htlc.amount_msat;
3104 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.opt_anchors() {
3107 let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
3108 (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
3109 dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
3111 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.counterparty_dust_limit_satoshis;
3112 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3113 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3114 if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
3115 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3116 on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
3117 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3121 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
3122 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3123 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3124 if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
3125 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3126 on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
3127 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3131 let pending_value_to_self_msat =
3132 self.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3133 let pending_remote_value_msat =
3134 self.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3135 if pending_remote_value_msat < msg.amount_msat {
3136 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3139 // Check that the remote can afford to pay for this HTLC on-chain at the current
3140 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3141 let remote_commit_tx_fee_msat = if self.is_outbound() { 0 } else {
3142 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3143 self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3145 if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
3146 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3149 if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.holder_selected_channel_reserve_satoshis * 1000 {
3150 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3153 if !self.is_outbound() {
3154 // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3155 // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
3156 // receiver's side, only on the sender's.
3157 // Note that when we eventually remove support for fee updates and switch to anchor output
3158 // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
3159 // the extra htlc when calculating the next remote commitment transaction fee as we should
3160 // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
3161 // sensitive to fee spikes.
3162 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3163 let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3164 if pending_remote_value_msat - msg.amount_msat - self.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
3165 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3166 // the HTLC, i.e. its status is already set to failing.
3167 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
3168 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3171 // Check that they won't violate our local required channel reserve by adding this HTLC.
3172 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3173 let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None);
3174 if self.value_to_self_msat < self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
3175 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3178 if self.next_counterparty_htlc_id != msg.htlc_id {
3179 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.next_counterparty_htlc_id)));
3181 if msg.cltv_expiry >= 500000000 {
3182 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3185 if self.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3186 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3187 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3191 // Now update local state:
3192 self.next_counterparty_htlc_id += 1;
3193 self.pending_inbound_htlcs.push(InboundHTLCOutput {
3194 htlc_id: msg.htlc_id,
3195 amount_msat: msg.amount_msat,
3196 payment_hash: msg.payment_hash,
3197 cltv_expiry: msg.cltv_expiry,
3198 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3203 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3205 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3206 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3207 for htlc in self.pending_outbound_htlcs.iter_mut() {
3208 if htlc.htlc_id == htlc_id {
3209 let outcome = match check_preimage {
3210 None => fail_reason.into(),
3211 Some(payment_preimage) => {
3212 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
3213 if payment_hash != htlc.payment_hash {
3214 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3216 OutboundHTLCOutcome::Success(Some(payment_preimage))
3220 OutboundHTLCState::LocalAnnounced(_) =>
3221 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3222 OutboundHTLCState::Committed => {
3223 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3225 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3226 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3231 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3234 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3235 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3236 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3238 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3239 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3242 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3245 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3246 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3247 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3249 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3250 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3253 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3257 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3258 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3259 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3261 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3262 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3265 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3269 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
3270 where L::Target: Logger
3272 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3273 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3275 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3276 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3278 if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
3279 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3282 let funding_script = self.get_funding_redeemscript();
3284 let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
3286 let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3287 let commitment_txid = {
3288 let trusted_tx = commitment_stats.tx.trust();
3289 let bitcoin_tx = trusted_tx.built_transaction();
3290 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
3292 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3293 log_bytes!(msg.signature.serialize_compact()[..]),
3294 log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3295 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
3296 if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
3297 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3301 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3303 // If our counterparty updated the channel fee in this commitment transaction, check that
3304 // they can actually afford the new fee now.
3305 let update_fee = if let Some((_, update_state)) = self.pending_update_fee {
3306 update_state == FeeUpdateState::RemoteAnnounced
3309 debug_assert!(!self.is_outbound());
3310 let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000;
3311 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3312 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3315 #[cfg(any(test, fuzzing))]
3317 if self.is_outbound() {
3318 let projected_commit_tx_info = self.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3319 *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3320 if let Some(info) = projected_commit_tx_info {
3321 let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len()
3322 + self.holding_cell_htlc_updates.len();
3323 if info.total_pending_htlcs == total_pending_htlcs
3324 && info.next_holder_htlc_id == self.next_holder_htlc_id
3325 && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id
3326 && info.feerate == self.feerate_per_kw {
3327 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3333 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3334 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3337 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3338 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3339 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3340 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3341 // backwards compatibility, we never use it in production. To provide test coverage, here,
3342 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3343 #[allow(unused_assignments, unused_mut)]
3344 let mut separate_nondust_htlc_sources = false;
3345 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3346 use core::hash::{BuildHasher, Hasher};
3347 // Get a random value using the only std API to do so - the DefaultHasher
3348 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3349 separate_nondust_htlc_sources = rand_val % 2 == 0;
3352 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3353 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3354 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3355 if let Some(_) = htlc.transaction_output_index {
3356 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3357 self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(),
3358 false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3360 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &keys);
3361 let htlc_sighashtype = if self.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3362 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3363 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3364 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3365 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
3366 if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3367 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3369 if !separate_nondust_htlc_sources {
3370 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3373 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3375 if separate_nondust_htlc_sources {
3376 if let Some(source) = source_opt.take() {
3377 nondust_htlc_sources.push(source);
3380 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3383 let holder_commitment_tx = HolderCommitmentTransaction::new(
3384 commitment_stats.tx,
3386 msg.htlc_signatures.clone(),
3387 &self.get_holder_pubkeys().funding_pubkey,
3388 self.counterparty_funding_pubkey()
3391 self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3392 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3394 // Update state now that we've passed all the can-fail calls...
3395 let mut need_commitment = false;
3396 if let &mut Some((_, ref mut update_state)) = &mut self.pending_update_fee {
3397 if *update_state == FeeUpdateState::RemoteAnnounced {
3398 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3399 need_commitment = true;
3403 for htlc in self.pending_inbound_htlcs.iter_mut() {
3404 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3405 Some(forward_info.clone())
3407 if let Some(forward_info) = new_forward {
3408 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3409 log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
3410 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3411 need_commitment = true;
3414 let mut claimed_htlcs = Vec::new();
3415 for htlc in self.pending_outbound_htlcs.iter_mut() {
3416 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3417 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3418 log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
3419 // Grab the preimage, if it exists, instead of cloning
3420 let mut reason = OutboundHTLCOutcome::Success(None);
3421 mem::swap(outcome, &mut reason);
3422 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3423 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3424 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3425 // have a `Success(None)` reason. In this case we could forget some HTLC
3426 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3427 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3429 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3431 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3432 need_commitment = true;
3436 self.latest_monitor_update_id += 1;
3437 let mut monitor_update = ChannelMonitorUpdate {
3438 update_id: self.latest_monitor_update_id,
3439 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3440 commitment_tx: holder_commitment_tx,
3441 htlc_outputs: htlcs_and_sigs,
3443 nondust_htlc_sources,
3447 self.cur_holder_commitment_transaction_number -= 1;
3448 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3449 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3450 self.resend_order = RAACommitmentOrder::CommitmentFirst;
3452 if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3453 // In case we initially failed monitor updating without requiring a response, we need
3454 // to make sure the RAA gets sent first.
3455 self.monitor_pending_revoke_and_ack = true;
3456 if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3457 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3458 // the corresponding HTLC status updates so that get_last_commitment_update
3459 // includes the right HTLCs.
3460 self.monitor_pending_commitment_signed = true;
3461 let mut additional_update = self.build_commitment_no_status_check(logger);
3462 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3463 // strictly increasing by one, so decrement it here.
3464 self.latest_monitor_update_id = monitor_update.update_id;
3465 monitor_update.updates.append(&mut additional_update.updates);
3467 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3468 log_bytes!(self.channel_id));
3469 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3472 let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3473 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3474 // we'll send one right away when we get the revoke_and_ack when we
3475 // free_holding_cell_htlcs().
3476 let mut additional_update = self.build_commitment_no_status_check(logger);
3477 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3478 // strictly increasing by one, so decrement it here.
3479 self.latest_monitor_update_id = monitor_update.update_id;
3480 monitor_update.updates.append(&mut additional_update.updates);
3484 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3485 log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3486 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3487 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3490 /// Public version of the below, checking relevant preconditions first.
3491 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3492 /// returns `(None, Vec::new())`.
3493 pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3494 if self.channel_state >= ChannelState::ChannelReady as u32 &&
3495 (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3496 self.free_holding_cell_htlcs(logger)
3497 } else { (None, Vec::new()) }
3500 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3501 /// for our counterparty.
3502 fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
3503 assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3504 if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
3505 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
3506 if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id()));
3508 let mut monitor_update = ChannelMonitorUpdate {
3509 update_id: self.latest_monitor_update_id + 1, // We don't increment this yet!
3510 updates: Vec::new(),
3513 let mut htlc_updates = Vec::new();
3514 mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
3515 let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
3516 let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
3517 let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
3518 let mut htlcs_to_fail = Vec::new();
3519 for htlc_update in htlc_updates.drain(..) {
3520 // Note that this *can* fail, though it should be due to rather-rare conditions on
3521 // fee races with adding too many outputs which push our total payments just over
3522 // the limit. In case it's less rare than I anticipate, we may want to revisit
3523 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3524 // to rebalance channels.
3525 match &htlc_update {
3526 &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
3527 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
3528 Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
3531 ChannelError::Ignore(ref msg) => {
3532 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
3533 log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
3534 // If we fail to send here, then this HTLC should
3535 // be failed backwards. Failing to send here
3536 // indicates that this HTLC may keep being put back
3537 // into the holding cell without ever being
3538 // successfully forwarded/failed/fulfilled, causing
3539 // our counterparty to eventually close on us.
3540 htlcs_to_fail.push((source.clone(), *payment_hash));
3543 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3549 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3550 // If an HTLC claim was previously added to the holding cell (via
3551 // `get_update_fulfill_htlc`, then generating the claim message itself must
3552 // not fail - any in between attempts to claim the HTLC will have resulted
3553 // in it hitting the holding cell again and we cannot change the state of a
3554 // holding cell HTLC from fulfill to anything else.
3555 let (update_fulfill_msg_option, mut additional_monitor_update) =
3556 if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
3557 (msg, monitor_update)
3558 } else { unreachable!() };
3559 update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
3560 monitor_update.updates.append(&mut additional_monitor_update.updates);
3562 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3563 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3564 Ok(update_fail_msg_option) => {
3565 // If an HTLC failure was previously added to the holding cell (via
3566 // `queue_fail_htlc`) then generating the fail message itself must
3567 // not fail - we should never end up in a state where we double-fail
3568 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3569 // for a full revocation before failing.
3570 update_fail_htlcs.push(update_fail_msg_option.unwrap())
3573 if let ChannelError::Ignore(_) = e {}
3575 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3582 if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
3583 return (None, htlcs_to_fail);
3585 let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() {
3586 self.send_update_fee(feerate, false, logger)
3591 let mut additional_update = self.build_commitment_no_status_check(logger);
3592 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3593 // but we want them to be strictly increasing by one, so reset it here.
3594 self.latest_monitor_update_id = monitor_update.update_id;
3595 monitor_update.updates.append(&mut additional_update.updates);
3597 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3598 log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
3599 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
3601 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3602 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3608 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3609 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3610 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3611 /// generating an appropriate error *after* the channel state has been updated based on the
3612 /// revoke_and_ack message.
3613 pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
3614 where L::Target: Logger,
3616 if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3617 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3619 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3620 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3622 if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
3623 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3626 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3628 if let Some(counterparty_prev_commitment_point) = self.counterparty_prev_commitment_point {
3629 if PublicKey::from_secret_key(&self.secp_ctx, &secret) != counterparty_prev_commitment_point {
3630 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3634 if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3635 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3636 // haven't given them a new commitment transaction to broadcast). We should probably
3637 // take advantage of this by updating our channel monitor, sending them an error, and
3638 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3639 // lot of work, and there's some chance this is all a misunderstanding anyway.
3640 // We have to do *something*, though, since our signer may get mad at us for otherwise
3641 // jumping a remote commitment number, so best to just force-close and move on.
3642 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3645 #[cfg(any(test, fuzzing))]
3647 *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3648 *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3651 self.holder_signer.validate_counterparty_revocation(
3652 self.cur_counterparty_commitment_transaction_number + 1,
3654 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3656 self.commitment_secrets.provide_secret(self.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3657 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3658 self.latest_monitor_update_id += 1;
3659 let mut monitor_update = ChannelMonitorUpdate {
3660 update_id: self.latest_monitor_update_id,
3661 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3662 idx: self.cur_counterparty_commitment_transaction_number + 1,
3663 secret: msg.per_commitment_secret,
3667 // Update state now that we've passed all the can-fail calls...
3668 // (note that we may still fail to generate the new commitment_signed message, but that's
3669 // OK, we step the channel here and *then* if the new generation fails we can fail the
3670 // channel based on that, but stepping stuff here should be safe either way.
3671 self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3672 self.sent_message_awaiting_response = None;
3673 self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
3674 self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3675 self.cur_counterparty_commitment_transaction_number -= 1;
3677 if self.announcement_sigs_state == AnnouncementSigsState::Committed {
3678 self.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3681 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id()));
3682 let mut to_forward_infos = Vec::new();
3683 let mut revoked_htlcs = Vec::new();
3684 let mut finalized_claimed_htlcs = Vec::new();
3685 let mut update_fail_htlcs = Vec::new();
3686 let mut update_fail_malformed_htlcs = Vec::new();
3687 let mut require_commitment = false;
3688 let mut value_to_self_msat_diff: i64 = 0;
3691 // Take references explicitly so that we can hold multiple references to self.
3692 let pending_inbound_htlcs: &mut Vec<_> = &mut self.pending_inbound_htlcs;
3693 let pending_outbound_htlcs: &mut Vec<_> = &mut self.pending_outbound_htlcs;
3695 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3696 pending_inbound_htlcs.retain(|htlc| {
3697 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3698 log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
3699 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3700 value_to_self_msat_diff += htlc.amount_msat as i64;
3705 pending_outbound_htlcs.retain(|htlc| {
3706 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3707 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
3708 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3709 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3711 finalized_claimed_htlcs.push(htlc.source.clone());
3712 // They fulfilled, so we sent them money
3713 value_to_self_msat_diff -= htlc.amount_msat as i64;
3718 for htlc in pending_inbound_htlcs.iter_mut() {
3719 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3721 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3725 let mut state = InboundHTLCState::Committed;
3726 mem::swap(&mut state, &mut htlc.state);
3728 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3729 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3730 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3731 require_commitment = true;
3732 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3733 match forward_info {
3734 PendingHTLCStatus::Fail(fail_msg) => {
3735 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
3736 require_commitment = true;
3738 HTLCFailureMsg::Relay(msg) => {
3739 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3740 update_fail_htlcs.push(msg)
3742 HTLCFailureMsg::Malformed(msg) => {
3743 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3744 update_fail_malformed_htlcs.push(msg)
3748 PendingHTLCStatus::Forward(forward_info) => {
3749 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
3750 to_forward_infos.push((forward_info, htlc.htlc_id));
3751 htlc.state = InboundHTLCState::Committed;
3757 for htlc in pending_outbound_htlcs.iter_mut() {
3758 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3759 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
3760 htlc.state = OutboundHTLCState::Committed;
3762 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3763 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
3764 // Grab the preimage, if it exists, instead of cloning
3765 let mut reason = OutboundHTLCOutcome::Success(None);
3766 mem::swap(outcome, &mut reason);
3767 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3768 require_commitment = true;
3772 self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3774 if let Some((feerate, update_state)) = self.pending_update_fee {
3775 match update_state {
3776 FeeUpdateState::Outbound => {
3777 debug_assert!(self.is_outbound());
3778 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3779 self.feerate_per_kw = feerate;
3780 self.pending_update_fee = None;
3782 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); },
3783 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3784 debug_assert!(!self.is_outbound());
3785 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3786 require_commitment = true;
3787 self.feerate_per_kw = feerate;
3788 self.pending_update_fee = None;
3793 if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3794 // We can't actually generate a new commitment transaction (incl by freeing holding
3795 // cells) while we can't update the monitor, so we just return what we have.
3796 if require_commitment {
3797 self.monitor_pending_commitment_signed = true;
3798 // When the monitor updating is restored we'll call get_last_commitment_update(),
3799 // which does not update state, but we're definitely now awaiting a remote revoke
3800 // before we can step forward any more, so set it here.
3801 let mut additional_update = self.build_commitment_no_status_check(logger);
3802 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3803 // strictly increasing by one, so decrement it here.
3804 self.latest_monitor_update_id = monitor_update.update_id;
3805 monitor_update.updates.append(&mut additional_update.updates);
3807 self.monitor_pending_forwards.append(&mut to_forward_infos);
3808 self.monitor_pending_failures.append(&mut revoked_htlcs);
3809 self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3810 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
3811 return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
3814 match self.free_holding_cell_htlcs(logger) {
3815 (Some(_), htlcs_to_fail) => {
3816 let mut additional_update = self.pending_monitor_updates.pop().unwrap().update;
3817 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3818 // strictly increasing by one, so decrement it here.
3819 self.latest_monitor_update_id = monitor_update.update_id;
3820 monitor_update.updates.append(&mut additional_update.updates);
3822 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3823 Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3825 (None, htlcs_to_fail) => {
3826 if require_commitment {
3827 let mut additional_update = self.build_commitment_no_status_check(logger);
3829 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3830 // strictly increasing by one, so decrement it here.
3831 self.latest_monitor_update_id = monitor_update.update_id;
3832 monitor_update.updates.append(&mut additional_update.updates);
3834 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
3835 log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
3836 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3837 Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3839 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
3840 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3841 Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
3847 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3848 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3849 /// commitment update.
3850 pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
3851 let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
3852 assert!(msg_opt.is_none(), "We forced holding cell?");
3855 /// Adds a pending update to this channel. See the doc for send_htlc for
3856 /// further details on the optionness of the return value.
3857 /// If our balance is too low to cover the cost of the next commitment transaction at the
3858 /// new feerate, the update is cancelled.
3860 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3861 /// [`Channel`] if `force_holding_cell` is false.
3862 fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
3863 if !self.is_outbound() {
3864 panic!("Cannot send fee from inbound channel");
3866 if !self.is_usable() {
3867 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3869 if !self.is_live() {
3870 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3873 // Before proposing a feerate update, check that we can actually afford the new fee.
3874 let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3875 let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3876 let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
3877 let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3878 let buffer_fee_msat = Channel::<Signer>::commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.opt_anchors()) * 1000;
3879 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3880 if holder_balance_msat < buffer_fee_msat + self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3881 //TODO: auto-close after a number of failures?
3882 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3886 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3887 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3888 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3889 if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
3890 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3893 if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
3894 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3898 if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3899 force_holding_cell = true;
3902 if force_holding_cell {
3903 self.holding_cell_update_fee = Some(feerate_per_kw);
3907 debug_assert!(self.pending_update_fee.is_none());
3908 self.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3910 Some(msgs::UpdateFee {
3911 channel_id: self.channel_id,
3916 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3917 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3919 /// No further message handling calls may be made until a channel_reestablish dance has
3921 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
3922 assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
3923 if self.channel_state < ChannelState::FundingSent as u32 {
3924 self.channel_state = ChannelState::ShutdownComplete as u32;
3928 if self.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3929 // While the below code should be idempotent, it's simpler to just return early, as
3930 // redundant disconnect events can fire, though they should be rare.
3934 if self.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.announcement_sigs_state == AnnouncementSigsState::Committed {
3935 self.announcement_sigs_state = AnnouncementSigsState::NotSent;
3938 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3939 // will be retransmitted.
3940 self.last_sent_closing_fee = None;
3941 self.pending_counterparty_closing_signed = None;
3942 self.closing_fee_limits = None;
3944 let mut inbound_drop_count = 0;
3945 self.pending_inbound_htlcs.retain(|htlc| {
3947 InboundHTLCState::RemoteAnnounced(_) => {
3948 // They sent us an update_add_htlc but we never got the commitment_signed.
3949 // We'll tell them what commitment_signed we're expecting next and they'll drop
3950 // this HTLC accordingly
3951 inbound_drop_count += 1;
3954 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3955 // We received a commitment_signed updating this HTLC and (at least hopefully)
3956 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3957 // in response to it yet, so don't touch it.
3960 InboundHTLCState::Committed => true,
3961 InboundHTLCState::LocalRemoved(_) => {
3962 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3963 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3964 // (that we missed). Keep this around for now and if they tell us they missed
3965 // the commitment_signed we can re-transmit the update then.
3970 self.next_counterparty_htlc_id -= inbound_drop_count;
3972 if let Some((_, update_state)) = self.pending_update_fee {
3973 if update_state == FeeUpdateState::RemoteAnnounced {
3974 debug_assert!(!self.is_outbound());
3975 self.pending_update_fee = None;
3979 for htlc in self.pending_outbound_htlcs.iter_mut() {
3980 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3981 // They sent us an update to remove this but haven't yet sent the corresponding
3982 // commitment_signed, we need to move it back to Committed and they can re-send
3983 // the update upon reconnection.
3984 htlc.state = OutboundHTLCState::Committed;
3988 self.sent_message_awaiting_response = None;
3990 self.channel_state |= ChannelState::PeerDisconnected as u32;
3991 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
3994 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3995 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3996 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3997 /// update completes (potentially immediately).
3998 /// The messages which were generated with the monitor update must *not* have been sent to the
3999 /// remote end, and must instead have been dropped. They will be regenerated when
4000 /// [`Self::monitor_updating_restored`] is called.
4002 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4003 /// [`chain::Watch`]: crate::chain::Watch
4004 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4005 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4006 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4007 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4008 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4010 self.monitor_pending_revoke_and_ack |= resend_raa;
4011 self.monitor_pending_commitment_signed |= resend_commitment;
4012 self.monitor_pending_channel_ready |= resend_channel_ready;
4013 self.monitor_pending_forwards.append(&mut pending_forwards);
4014 self.monitor_pending_failures.append(&mut pending_fails);
4015 self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4016 self.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
4019 /// Indicates that the latest ChannelMonitor update has been committed by the client
4020 /// successfully and we should restore normal operation. Returns messages which should be sent
4021 /// to the remote side.
4022 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4023 &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
4024 user_config: &UserConfig, best_block_height: u32
4025 ) -> MonitorRestoreUpdates
4028 NS::Target: NodeSigner
4030 assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
4031 self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
4032 let mut found_blocked = false;
4033 self.pending_monitor_updates.retain(|upd| {
4034 if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
4035 if upd.blocked { found_blocked = true; }
4039 // If we're past (or at) the FundingSent stage on an outbound channel, try to
4040 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4041 // first received the funding_signed.
4042 let mut funding_broadcastable =
4043 if self.is_outbound() && self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
4044 self.funding_transaction.take()
4046 // That said, if the funding transaction is already confirmed (ie we're active with a
4047 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4048 if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.minimum_depth != Some(0) {
4049 funding_broadcastable = None;
4052 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4053 // (and we assume the user never directly broadcasts the funding transaction and waits for
4054 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4055 // * an inbound channel that failed to persist the monitor on funding_created and we got
4056 // the funding transaction confirmed before the monitor was persisted, or
4057 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4058 let channel_ready = if self.monitor_pending_channel_ready {
4059 assert!(!self.is_outbound() || self.minimum_depth == Some(0),
4060 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4061 self.monitor_pending_channel_ready = false;
4062 let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
4063 Some(msgs::ChannelReady {
4064 channel_id: self.channel_id(),
4065 next_per_commitment_point,
4066 short_channel_id_alias: Some(self.outbound_scid_alias),
4070 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
4072 let mut accepted_htlcs = Vec::new();
4073 mem::swap(&mut accepted_htlcs, &mut self.monitor_pending_forwards);
4074 let mut failed_htlcs = Vec::new();
4075 mem::swap(&mut failed_htlcs, &mut self.monitor_pending_failures);
4076 let mut finalized_claimed_htlcs = Vec::new();
4077 mem::swap(&mut finalized_claimed_htlcs, &mut self.monitor_pending_finalized_fulfills);
4079 if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4080 self.monitor_pending_revoke_and_ack = false;
4081 self.monitor_pending_commitment_signed = false;
4082 return MonitorRestoreUpdates {
4083 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4084 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4088 let raa = if self.monitor_pending_revoke_and_ack {
4089 Some(self.get_last_revoke_and_ack())
4091 let commitment_update = if self.monitor_pending_commitment_signed {
4092 self.mark_awaiting_response();
4093 Some(self.get_last_commitment_update(logger))
4096 self.monitor_pending_revoke_and_ack = false;
4097 self.monitor_pending_commitment_signed = false;
4098 let order = self.resend_order.clone();
4099 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4100 log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4101 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4102 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4103 MonitorRestoreUpdates {
4104 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4108 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4109 where F::Target: FeeEstimator, L::Target: Logger
4111 if self.is_outbound() {
4112 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4114 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4115 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4117 Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.feerate_per_kw), logger)?;
4118 let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(None);
4120 self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4121 self.update_time_counter += 1;
4122 // If the feerate has increased over the previous dust buffer (note that
4123 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
4124 // won't be pushed over our dust exposure limit by the feerate increase.
4125 if feerate_over_dust_buffer {
4126 let inbound_stats = self.get_inbound_pending_htlc_stats(None);
4127 let outbound_stats = self.get_outbound_pending_htlc_stats(None);
4128 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4129 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4130 if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
4131 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4132 msg.feerate_per_kw, holder_tx_dust_exposure)));
4134 if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
4135 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4136 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4142 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4143 let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
4144 let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 2);
4145 msgs::RevokeAndACK {
4146 channel_id: self.channel_id,
4147 per_commitment_secret,
4148 next_per_commitment_point,
4150 next_local_nonce: None,
4154 fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
4155 let mut update_add_htlcs = Vec::new();
4156 let mut update_fulfill_htlcs = Vec::new();
4157 let mut update_fail_htlcs = Vec::new();
4158 let mut update_fail_malformed_htlcs = Vec::new();
4160 for htlc in self.pending_outbound_htlcs.iter() {
4161 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4162 update_add_htlcs.push(msgs::UpdateAddHTLC {
4163 channel_id: self.channel_id(),
4164 htlc_id: htlc.htlc_id,
4165 amount_msat: htlc.amount_msat,
4166 payment_hash: htlc.payment_hash,
4167 cltv_expiry: htlc.cltv_expiry,
4168 onion_routing_packet: (**onion_packet).clone(),
4173 for htlc in self.pending_inbound_htlcs.iter() {
4174 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4176 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4177 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4178 channel_id: self.channel_id(),
4179 htlc_id: htlc.htlc_id,
4180 reason: err_packet.clone()
4183 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4184 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4185 channel_id: self.channel_id(),
4186 htlc_id: htlc.htlc_id,
4187 sha256_of_onion: sha256_of_onion.clone(),
4188 failure_code: failure_code.clone(),
4191 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4192 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4193 channel_id: self.channel_id(),
4194 htlc_id: htlc.htlc_id,
4195 payment_preimage: payment_preimage.clone(),
4202 let update_fee = if self.is_outbound() && self.pending_update_fee.is_some() {
4203 Some(msgs::UpdateFee {
4204 channel_id: self.channel_id(),
4205 feerate_per_kw: self.pending_update_fee.unwrap().0,
4209 log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4210 log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
4211 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4212 msgs::CommitmentUpdate {
4213 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4214 commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
4218 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4219 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4221 /// Some links printed in log lines are included here to check them during build (when run with
4222 /// `cargo doc --document-private-items`):
4223 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4224 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4225 pub fn channel_reestablish<L: Deref, NS: Deref>(
4226 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4227 genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
4228 ) -> Result<ReestablishResponses, ChannelError>
4231 NS::Target: NodeSigner
4233 if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4234 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4235 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4236 // just close here instead of trying to recover.
4237 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4240 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4241 msg.next_local_commitment_number == 0 {
4242 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
4245 if msg.next_remote_commitment_number > 0 {
4246 let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
4247 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4248 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4249 if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
4250 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4252 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
4253 macro_rules! log_and_panic {
4254 ($err_msg: expr) => {
4255 log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
4256 panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
4259 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4260 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4261 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4262 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4263 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4264 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4265 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4266 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4270 // Before we change the state of the channel, we check if the peer is sending a very old
4271 // commitment transaction number, if yes we send a warning message.
4272 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1;
4273 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4275 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4279 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4280 // remaining cases either succeed or ErrorMessage-fail).
4281 self.channel_state &= !(ChannelState::PeerDisconnected as u32);
4282 self.sent_message_awaiting_response = None;
4284 let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4285 assert!(self.shutdown_scriptpubkey.is_some());
4286 Some(msgs::Shutdown {
4287 channel_id: self.channel_id,
4288 scriptpubkey: self.get_closing_scriptpubkey(),
4292 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
4294 if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4295 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4296 if self.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4297 self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4298 if msg.next_remote_commitment_number != 0 {
4299 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4301 // Short circuit the whole handler as there is nothing we can resend them
4302 return Ok(ReestablishResponses {
4303 channel_ready: None,
4304 raa: None, commitment_update: None,
4305 order: RAACommitmentOrder::CommitmentFirst,
4306 shutdown_msg, announcement_sigs,
4310 // We have OurChannelReady set!
4311 let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
4312 return Ok(ReestablishResponses {
4313 channel_ready: Some(msgs::ChannelReady {
4314 channel_id: self.channel_id(),
4315 next_per_commitment_point,
4316 short_channel_id_alias: Some(self.outbound_scid_alias),
4318 raa: None, commitment_update: None,
4319 order: RAACommitmentOrder::CommitmentFirst,
4320 shutdown_msg, announcement_sigs,
4324 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
4325 // Remote isn't waiting on any RevokeAndACK from us!
4326 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4328 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_holder_commitment_transaction_number {
4329 if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4330 self.monitor_pending_revoke_and_ack = true;
4333 Some(self.get_last_revoke_and_ack())
4336 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4339 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4340 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4341 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4342 // the corresponding revoke_and_ack back yet.
4343 let is_awaiting_remote_revoke = self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4344 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4345 self.mark_awaiting_response();
4347 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4349 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
4350 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4351 let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
4352 Some(msgs::ChannelReady {
4353 channel_id: self.channel_id(),
4354 next_per_commitment_point,
4355 short_channel_id_alias: Some(self.outbound_scid_alias),
4359 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4360 if required_revoke.is_some() {
4361 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
4363 log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
4366 Ok(ReestablishResponses {
4367 channel_ready, shutdown_msg, announcement_sigs,
4368 raa: required_revoke,
4369 commitment_update: None,
4370 order: self.resend_order.clone(),
4372 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4373 if required_revoke.is_some() {
4374 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
4376 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id()));
4379 if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4380 self.monitor_pending_commitment_signed = true;
4381 Ok(ReestablishResponses {
4382 channel_ready, shutdown_msg, announcement_sigs,
4383 commitment_update: None, raa: None,
4384 order: self.resend_order.clone(),
4387 Ok(ReestablishResponses {
4388 channel_ready, shutdown_msg, announcement_sigs,
4389 raa: required_revoke,
4390 commitment_update: Some(self.get_last_commitment_update(logger)),
4391 order: self.resend_order.clone(),
4395 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4399 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4400 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4401 /// at which point they will be recalculated.
4402 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4404 where F::Target: FeeEstimator
4406 if let Some((min, max)) = self.closing_fee_limits { return (min, max); }
4408 // Propose a range from our current Background feerate to our Normal feerate plus our
4409 // force_close_avoidance_max_fee_satoshis.
4410 // If we fail to come to consensus, we'll have to force-close.
4411 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
4412 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
4413 let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
4415 // The spec requires that (when the channel does not have anchors) we only send absolute
4416 // channel fees no greater than the absolute channel fee on the current commitment
4417 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4418 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4419 // some force-closure by old nodes, but we wanted to close the channel anyway.
4421 if let Some(target_feerate) = self.target_closing_feerate_sats_per_kw {
4422 let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.feerate_per_kw, target_feerate) };
4423 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4424 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4427 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4428 // below our dust limit, causing the output to disappear. We don't bother handling this
4429 // case, however, as this should only happen if a channel is closed before any (material)
4430 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4431 // come to consensus with our counterparty on appropriate fees, however it should be a
4432 // relatively rare case. We can revisit this later, though note that in order to determine
4433 // if the funders' output is dust we have to know the absolute fee we're going to use.
4434 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4435 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4436 let proposed_max_total_fee_satoshis = if self.is_outbound() {
4437 // We always add force_close_avoidance_max_fee_satoshis to our normal
4438 // feerate-calculated fee, but allow the max to be overridden if we're using a
4439 // target feerate-calculated fee.
4440 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.options.force_close_avoidance_max_fee_satoshis,
4441 proposed_max_feerate as u64 * tx_weight / 1000)
4443 self.channel_value_satoshis - (self.value_to_self_msat + 999) / 1000
4446 self.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4447 self.closing_fee_limits.clone().unwrap()
4450 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4451 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4452 /// this point if we're the funder we should send the initial closing_signed, and in any case
4453 /// shutdown should complete within a reasonable timeframe.
4454 fn closing_negotiation_ready(&self) -> bool {
4455 self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() &&
4456 self.channel_state &
4457 (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
4458 ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
4459 == BOTH_SIDES_SHUTDOWN_MASK &&
4460 self.pending_update_fee.is_none()
4463 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4464 /// an Err if no progress is being made and the channel should be force-closed instead.
4465 /// Should be called on a one-minute timer.
4466 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4467 if self.closing_negotiation_ready() {
4468 if self.closing_signed_in_flight {
4469 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4471 self.closing_signed_in_flight = true;
4477 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4478 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4479 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4480 where F::Target: FeeEstimator, L::Target: Logger
4482 if self.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4483 return Ok((None, None));
4486 if !self.is_outbound() {
4487 if let Some(msg) = &self.pending_counterparty_closing_signed.take() {
4488 return self.closing_signed(fee_estimator, &msg);
4490 return Ok((None, None));
4493 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4495 assert!(self.shutdown_scriptpubkey.is_some());
4496 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4497 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4498 our_min_fee, our_max_fee, total_fee_satoshis);
4500 let sig = self.holder_signer
4501 .sign_closing_transaction(&closing_tx, &self.secp_ctx)
4502 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4504 self.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4505 Ok((Some(msgs::ClosingSigned {
4506 channel_id: self.channel_id,
4507 fee_satoshis: total_fee_satoshis,
4509 fee_range: Some(msgs::ClosingSignedFeeRange {
4510 min_fee_satoshis: our_min_fee,
4511 max_fee_satoshis: our_max_fee,
4516 // Marks a channel as waiting for a response from the counterparty. If it's not received
4517 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4519 fn mark_awaiting_response(&mut self) {
4520 self.sent_message_awaiting_response = Some(0);
4523 /// Determines whether we should disconnect the counterparty due to not receiving a response
4524 /// within our expected timeframe.
4526 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4527 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4528 let ticks_elapsed = if let Some(ticks_elapsed) = self.sent_message_awaiting_response.as_mut() {
4531 // Don't disconnect when we're not waiting on a response.
4534 *ticks_elapsed += 1;
4535 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4538 pub fn shutdown<SP: Deref>(
4539 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4540 ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4541 where SP::Target: SignerProvider
4543 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4544 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4546 if self.channel_state < ChannelState::FundingSent as u32 {
4547 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4548 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4549 // can do that via error message without getting a connection fail anyway...
4550 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4552 for htlc in self.pending_inbound_htlcs.iter() {
4553 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4554 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4557 assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
4559 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4560 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4563 if self.counterparty_shutdown_scriptpubkey.is_some() {
4564 if Some(&msg.scriptpubkey) != self.counterparty_shutdown_scriptpubkey.as_ref() {
4565 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4568 self.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4571 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4572 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4573 // any further commitment updates after we set LocalShutdownSent.
4574 let send_shutdown = (self.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4576 let update_shutdown_script = match self.shutdown_scriptpubkey {
4579 assert!(send_shutdown);
4580 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4581 Ok(scriptpubkey) => scriptpubkey,
4582 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4584 if !shutdown_scriptpubkey.is_compatible(their_features) {
4585 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4587 self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4592 // From here on out, we may not fail!
4594 self.channel_state |= ChannelState::RemoteShutdownSent as u32;
4595 self.update_time_counter += 1;
4597 let monitor_update = if update_shutdown_script {
4598 self.latest_monitor_update_id += 1;
4599 let monitor_update = ChannelMonitorUpdate {
4600 update_id: self.latest_monitor_update_id,
4601 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4602 scriptpubkey: self.get_closing_scriptpubkey(),
4605 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4606 if self.push_blockable_mon_update(monitor_update) {
4607 self.pending_monitor_updates.last().map(|upd| &upd.update)
4610 let shutdown = if send_shutdown {
4611 Some(msgs::Shutdown {
4612 channel_id: self.channel_id,
4613 scriptpubkey: self.get_closing_scriptpubkey(),
4617 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4618 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4619 // cell HTLCs and return them to fail the payment.
4620 self.holding_cell_update_fee = None;
4621 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
4622 self.holding_cell_htlc_updates.retain(|htlc_update| {
4624 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4625 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4632 self.channel_state |= ChannelState::LocalShutdownSent as u32;
4633 self.update_time_counter += 1;
4635 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4638 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4639 let mut tx = closing_tx.trust().built_transaction().clone();
4641 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4643 let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize();
4644 let counterparty_funding_key = self.counterparty_funding_pubkey().serialize();
4645 let mut holder_sig = sig.serialize_der().to_vec();
4646 holder_sig.push(EcdsaSighashType::All as u8);
4647 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4648 cp_sig.push(EcdsaSighashType::All as u8);
4649 if funding_key[..] < counterparty_funding_key[..] {
4650 tx.input[0].witness.push(holder_sig);
4651 tx.input[0].witness.push(cp_sig);
4653 tx.input[0].witness.push(cp_sig);
4654 tx.input[0].witness.push(holder_sig);
4657 tx.input[0].witness.push(self.get_funding_redeemscript().into_bytes());
4661 pub fn closing_signed<F: Deref>(
4662 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4663 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4664 where F::Target: FeeEstimator
4666 if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4667 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4669 if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4670 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4672 if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() {
4673 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4675 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4676 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4679 if self.is_outbound() && self.last_sent_closing_fee.is_none() {
4680 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4683 if self.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4684 self.pending_counterparty_closing_signed = Some(msg.clone());
4685 return Ok((None, None));
4688 let funding_redeemscript = self.get_funding_redeemscript();
4689 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4690 if used_total_fee != msg.fee_satoshis {
4691 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4693 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis);
4695 match self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
4698 // The remote end may have decided to revoke their output due to inconsistent dust
4699 // limits, so check for that case by re-checking the signature here.
4700 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4701 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.channel_value_satoshis);
4702 secp_check!(self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4706 for outp in closing_tx.trust().built_transaction().output.iter() {
4707 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4708 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4712 assert!(self.shutdown_scriptpubkey.is_some());
4713 if let Some((last_fee, sig)) = self.last_sent_closing_fee {
4714 if last_fee == msg.fee_satoshis {
4715 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4716 self.channel_state = ChannelState::ShutdownComplete as u32;
4717 self.update_time_counter += 1;
4718 return Ok((None, Some(tx)));
4722 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4724 macro_rules! propose_fee {
4725 ($new_fee: expr) => {
4726 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4727 (closing_tx, $new_fee)
4729 self.build_closing_transaction($new_fee, false)
4732 let sig = self.holder_signer
4733 .sign_closing_transaction(&closing_tx, &self.secp_ctx)
4734 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4736 let signed_tx = if $new_fee == msg.fee_satoshis {
4737 self.channel_state = ChannelState::ShutdownComplete as u32;
4738 self.update_time_counter += 1;
4739 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4743 self.last_sent_closing_fee = Some((used_fee, sig.clone()));
4744 return Ok((Some(msgs::ClosingSigned {
4745 channel_id: self.channel_id,
4746 fee_satoshis: used_fee,
4748 fee_range: Some(msgs::ClosingSignedFeeRange {
4749 min_fee_satoshis: our_min_fee,
4750 max_fee_satoshis: our_max_fee,
4756 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4757 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4758 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4760 if max_fee_satoshis < our_min_fee {
4761 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4763 if min_fee_satoshis > our_max_fee {
4764 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4767 if !self.is_outbound() {
4768 // They have to pay, so pick the highest fee in the overlapping range.
4769 // We should never set an upper bound aside from their full balance
4770 debug_assert_eq!(our_max_fee, self.channel_value_satoshis - (self.value_to_self_msat + 999) / 1000);
4771 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4773 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4774 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4775 msg.fee_satoshis, our_min_fee, our_max_fee)));
4777 // The proposed fee is in our acceptable range, accept it and broadcast!
4778 propose_fee!(msg.fee_satoshis);
4781 // Old fee style negotiation. We don't bother to enforce whether they are complying
4782 // with the "making progress" requirements, we just comply and hope for the best.
4783 if let Some((last_fee, _)) = self.last_sent_closing_fee {
4784 if msg.fee_satoshis > last_fee {
4785 if msg.fee_satoshis < our_max_fee {
4786 propose_fee!(msg.fee_satoshis);
4787 } else if last_fee < our_max_fee {
4788 propose_fee!(our_max_fee);
4790 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4793 if msg.fee_satoshis > our_min_fee {
4794 propose_fee!(msg.fee_satoshis);
4795 } else if last_fee > our_min_fee {
4796 propose_fee!(our_min_fee);
4798 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4802 if msg.fee_satoshis < our_min_fee {
4803 propose_fee!(our_min_fee);
4804 } else if msg.fee_satoshis > our_max_fee {
4805 propose_fee!(our_max_fee);
4807 propose_fee!(msg.fee_satoshis);
4813 // Public utilities:
4815 pub fn channel_id(&self) -> [u8; 32] {
4819 // Return the `temporary_channel_id` used during channel establishment.
4821 // Will return `None` for channels created prior to LDK version 0.0.115.
4822 pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
4823 self.temporary_channel_id
4826 pub fn minimum_depth(&self) -> Option<u32> {
4830 /// Gets the "user_id" value passed into the construction of this channel. It has no special
4831 /// meaning and exists only to allow users to have a persistent identifier of a channel.
4832 pub fn get_user_id(&self) -> u128 {
4836 /// Gets the channel's type
4837 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
4841 /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
4842 /// is_usable() returns true).
4843 /// Allowed in any state (including after shutdown)
4844 pub fn get_short_channel_id(&self) -> Option<u64> {
4845 self.short_channel_id
4848 /// Allowed in any state (including after shutdown)
4849 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
4850 self.latest_inbound_scid_alias
4853 /// Allowed in any state (including after shutdown)
4854 pub fn outbound_scid_alias(&self) -> u64 {
4855 self.outbound_scid_alias
4857 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
4858 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
4859 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
4860 assert_eq!(self.outbound_scid_alias, 0);
4861 self.outbound_scid_alias = outbound_scid_alias;
4864 /// Returns the funding_txo we either got from our peer, or were given by
4865 /// get_outbound_funding_created.
4866 pub fn get_funding_txo(&self) -> Option<OutPoint> {
4867 self.channel_transaction_parameters.funding_outpoint
4870 /// Returns the block hash in which our funding transaction was confirmed.
4871 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
4872 self.funding_tx_confirmed_in
4875 /// Returns the current number of confirmations on the funding transaction.
4876 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
4877 if self.funding_tx_confirmation_height == 0 {
4878 // We either haven't seen any confirmation yet, or observed a reorg.
4882 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
4885 fn get_holder_selected_contest_delay(&self) -> u16 {
4886 self.channel_transaction_parameters.holder_selected_contest_delay
4889 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
4890 &self.channel_transaction_parameters.holder_pubkeys
4893 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
4894 self.channel_transaction_parameters.counterparty_parameters
4895 .as_ref().map(|params| params.selected_contest_delay)
4898 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
4899 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
4902 /// Allowed in any state (including after shutdown)
4903 pub fn get_counterparty_node_id(&self) -> PublicKey {
4904 self.counterparty_node_id
4907 /// Allowed in any state (including after shutdown)
4908 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
4909 self.holder_htlc_minimum_msat
4912 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
4913 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
4914 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
4917 /// Allowed in any state (including after shutdown)
4918 pub fn get_announced_htlc_max_msat(&self) -> u64 {
4920 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
4921 // to use full capacity. This is an effort to reduce routing failures, because in many cases
4922 // channel might have been used to route very small values (either by honest users or as DoS).
4923 self.channel_value_satoshis * 1000 * 9 / 10,
4925 self.counterparty_max_htlc_value_in_flight_msat
4929 /// Allowed in any state (including after shutdown)
4930 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
4931 self.counterparty_htlc_minimum_msat
4934 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
4935 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
4936 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
4939 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
4940 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
4941 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
4943 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
4944 party_max_htlc_value_in_flight_msat
4949 pub fn get_value_satoshis(&self) -> u64 {
4950 self.channel_value_satoshis
4953 pub fn get_fee_proportional_millionths(&self) -> u32 {
4954 self.config.options.forwarding_fee_proportional_millionths
4957 pub fn get_cltv_expiry_delta(&self) -> u16 {
4958 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
4961 pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
4962 self.config.options.max_dust_htlc_exposure_msat
4965 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
4966 pub fn prev_config(&self) -> Option<ChannelConfig> {
4967 self.prev_config.map(|prev_config| prev_config.0)
4970 // Checks whether we should emit a `ChannelPending` event.
4971 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
4972 self.is_funding_initiated() && !self.channel_pending_event_emitted
4975 // Returns whether we already emitted a `ChannelPending` event.
4976 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
4977 self.channel_pending_event_emitted
4980 // Remembers that we already emitted a `ChannelPending` event.
4981 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
4982 self.channel_pending_event_emitted = true;
4985 // Checks whether we should emit a `ChannelReady` event.
4986 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
4987 self.is_usable() && !self.channel_ready_event_emitted
4990 // Remembers that we already emitted a `ChannelReady` event.
4991 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
4992 self.channel_ready_event_emitted = true;
4995 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
4996 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
4997 /// no longer be considered when forwarding HTLCs.
4998 pub fn maybe_expire_prev_config(&mut self) {
4999 if self.prev_config.is_none() {
5002 let prev_config = self.prev_config.as_mut().unwrap();
5004 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
5005 self.prev_config = None;
5009 /// Returns the current [`ChannelConfig`] applied to the channel.
5010 pub fn config(&self) -> ChannelConfig {
5014 /// Updates the channel's config. A bool is returned indicating whether the config update
5015 /// applied resulted in a new ChannelUpdate message.
5016 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
5017 let did_channel_update =
5018 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
5019 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
5020 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
5021 if did_channel_update {
5022 self.prev_config = Some((self.config.options, 0));
5023 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
5024 // policy change to propagate throughout the network.
5025 self.update_time_counter += 1;
5027 self.config.options = *config;
5031 fn internal_htlc_satisfies_config(
5032 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5033 ) -> Result<(), (&'static str, u16)> {
5034 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5035 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5036 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5037 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5039 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5040 0x1000 | 12, // fee_insufficient
5043 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5045 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5046 0x1000 | 13, // incorrect_cltv_expiry
5052 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5053 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5054 /// unsuccessful, falls back to the previous one if one exists.
5055 pub fn htlc_satisfies_config(
5056 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5057 ) -> Result<(), (&'static str, u16)> {
5058 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.config())
5060 if let Some(prev_config) = self.prev_config() {
5061 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5068 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
5072 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
5073 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
5074 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
5075 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
5076 // more dust balance if the feerate increases when we have several HTLCs pending
5077 // which are near the dust limit.
5078 let mut feerate_per_kw = self.feerate_per_kw;
5079 // If there's a pending update fee, use it to ensure we aren't under-estimating
5080 // potential feerate updates coming soon.
5081 if let Some((feerate, _)) = self.pending_update_fee {
5082 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
5084 if let Some(feerate) = outbound_feerate_update {
5085 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
5087 cmp::max(2530, feerate_per_kw * 1250 / 1000)
5090 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5091 self.cur_holder_commitment_transaction_number + 1
5094 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5095 self.cur_counterparty_commitment_transaction_number + 1 - if self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
5098 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5099 self.cur_counterparty_commitment_transaction_number + 2
5103 pub fn get_signer(&self) -> &Signer {
5108 pub fn get_value_stat(&self) -> ChannelValueStat {
5110 value_to_self_msat: self.value_to_self_msat,
5111 channel_value_msat: self.channel_value_satoshis * 1000,
5112 channel_reserve_msat: self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5113 pending_outbound_htlcs_amount_msat: self.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5114 pending_inbound_htlcs_amount_msat: self.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5115 holding_cell_outbound_amount_msat: {
5117 for h in self.holding_cell_htlc_updates.iter() {
5119 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5127 counterparty_max_htlc_value_in_flight_msat: self.counterparty_max_htlc_value_in_flight_msat,
5128 counterparty_dust_limit_msat: self.counterparty_dust_limit_satoshis * 1000,
5132 /// Allowed in any state (including after shutdown)
5133 pub fn get_update_time_counter(&self) -> u32 {
5134 self.update_time_counter
5137 pub fn get_latest_monitor_update_id(&self) -> u64 {
5138 self.latest_monitor_update_id
5141 pub fn should_announce(&self) -> bool {
5142 self.config.announced_channel
5145 pub fn is_outbound(&self) -> bool {
5146 self.channel_transaction_parameters.is_outbound_from_holder
5149 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
5150 /// Allowed in any state (including after shutdown)
5151 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
5152 self.config.options.forwarding_fee_base_msat
5155 /// Returns true if we've ever received a message from the remote end for this Channel
5156 pub fn have_received_message(&self) -> bool {
5157 self.channel_state > (ChannelState::OurInitSent as u32)
5160 /// Returns true if this channel is fully established and not known to be closing.
5161 /// Allowed in any state (including after shutdown)
5162 pub fn is_usable(&self) -> bool {
5163 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
5164 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
5167 /// Returns true if this channel is currently available for use. This is a superset of
5168 /// is_usable() and considers things like the channel being temporarily disabled.
5169 /// Allowed in any state (including after shutdown)
5170 pub fn is_live(&self) -> bool {
5171 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
5174 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5175 /// Allowed in any state (including after shutdown)
5176 pub fn is_awaiting_monitor_update(&self) -> bool {
5177 (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
5180 pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
5181 if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); }
5182 self.pending_monitor_updates[0].update.update_id - 1
5185 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5186 /// further blocked monitor update exists after the next.
5187 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
5188 for i in 0..self.pending_monitor_updates.len() {
5189 if self.pending_monitor_updates[i].blocked {
5190 self.pending_monitor_updates[i].blocked = false;
5191 return Some((&self.pending_monitor_updates[i].update,
5192 self.pending_monitor_updates.len() > i + 1));
5198 /// Pushes a new monitor update into our monitor update queue, returning whether it should be
5199 /// immediately given to the user for persisting or if it should be held as blocked.
5200 fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
5201 let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
5202 self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
5203 update, blocked: !release_monitor
5208 /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
5209 /// it should be immediately given to the user for persisting or `None` if it should be held as
5211 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5212 -> Option<&ChannelMonitorUpdate> {
5213 let release_monitor = self.push_blockable_mon_update(update);
5214 if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
5217 pub fn no_monitor_updates_pending(&self) -> bool {
5218 self.pending_monitor_updates.is_empty()
5221 pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
5222 self.pending_monitor_updates.retain(|upd| {
5223 if upd.update.update_id <= update_id {
5224 assert!(!upd.blocked, "Completed update must have flown");
5230 pub fn complete_one_mon_update(&mut self, update_id: u64) {
5231 self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
5234 /// Returns an iterator over all unblocked monitor updates which have not yet completed.
5235 pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
5236 self.pending_monitor_updates.iter()
5237 .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
5240 /// Returns true if funding_created was sent/received.
5241 pub fn is_funding_initiated(&self) -> bool {
5242 self.channel_state >= ChannelState::FundingSent as u32
5245 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5246 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5247 /// transaction. If the channel is inbound, this implies simply that the channel has not
5249 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5250 if !self.is_awaiting_monitor_update() { return false; }
5251 if self.channel_state &
5252 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
5253 == ChannelState::FundingSent as u32 {
5254 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5255 // FundingSent set, though our peer could have sent their channel_ready.
5256 debug_assert!(self.minimum_depth.unwrap_or(1) > 0);
5259 if self.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5260 self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5261 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
5262 // waiting for the initial monitor persistence. Thus, we check if our commitment
5263 // transaction numbers have both been iterated only exactly once (for the
5264 // funding_signed), and we're awaiting monitor update.
5266 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5267 // only way to get an awaiting-monitor-update state during initial funding is if the
5268 // initial monitor persistence is still pending).
5270 // Because deciding we're awaiting initial broadcast spuriously could result in
5271 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5272 // we hard-assert here, even in production builds.
5273 if self.is_outbound() { assert!(self.funding_transaction.is_some()); }
5274 assert!(self.monitor_pending_channel_ready);
5275 assert_eq!(self.latest_monitor_update_id, 0);
5281 /// Returns true if our channel_ready has been sent
5282 pub fn is_our_channel_ready(&self) -> bool {
5283 (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelReady as u32
5286 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5287 pub fn received_shutdown(&self) -> bool {
5288 (self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
5291 /// Returns true if we either initiated or agreed to shut down the channel.
5292 pub fn sent_shutdown(&self) -> bool {
5293 (self.channel_state & ChannelState::LocalShutdownSent as u32) != 0
5296 /// Returns true if this channel is fully shut down. True here implies that no further actions
5297 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5298 /// will be handled appropriately by the chain monitor.
5299 pub fn is_shutdown(&self) -> bool {
5300 if (self.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
5301 assert!(self.channel_state == ChannelState::ShutdownComplete as u32);
5306 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5307 self.channel_update_status
5310 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5311 self.update_time_counter += 1;
5312 self.channel_update_status = status;
5315 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5317 // * always when a new block/transactions are confirmed with the new height
5318 // * when funding is signed with a height of 0
5319 if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) {
5323 let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
5324 if funding_tx_confirmations <= 0 {
5325 self.funding_tx_confirmation_height = 0;
5328 if funding_tx_confirmations < self.minimum_depth.unwrap_or(0) as i64 {
5332 let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
5333 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5334 self.channel_state |= ChannelState::OurChannelReady as u32;
5336 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5337 self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS);
5338 self.update_time_counter += 1;
5340 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5341 // We got a reorg but not enough to trigger a force close, just ignore.
5344 if self.funding_tx_confirmation_height != 0 && self.channel_state < ChannelState::ChannelReady as u32 {
5345 // We should never see a funding transaction on-chain until we've received
5346 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5347 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5348 // however, may do this and we shouldn't treat it as a bug.
5349 #[cfg(not(fuzzing))]
5350 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5351 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5352 self.channel_state);
5354 // We got a reorg but not enough to trigger a force close, just ignore.
5358 if need_commitment_update {
5359 if self.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5360 if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5361 let next_per_commitment_point =
5362 self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.secp_ctx);
5363 return Some(msgs::ChannelReady {
5364 channel_id: self.channel_id,
5365 next_per_commitment_point,
5366 short_channel_id_alias: Some(self.outbound_scid_alias),
5370 self.monitor_pending_channel_ready = true;
5376 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5377 /// In the first case, we store the confirmation height and calculating the short channel id.
5378 /// In the second, we simply return an Err indicating we need to be force-closed now.
5379 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5380 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5381 genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5382 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5384 NS::Target: NodeSigner,
5387 if let Some(funding_txo) = self.get_funding_txo() {
5388 for &(index_in_block, tx) in txdata.iter() {
5389 // Check if the transaction is the expected funding transaction, and if it is,
5390 // check that it pays the right amount to the right script.
5391 if self.funding_tx_confirmation_height == 0 {
5392 if tx.txid() == funding_txo.txid {
5393 let txo_idx = funding_txo.index as usize;
5394 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
5395 tx.output[txo_idx].value != self.channel_value_satoshis {
5396 if self.is_outbound() {
5397 // If we generated the funding transaction and it doesn't match what it
5398 // should, the client is really broken and we should just panic and
5399 // tell them off. That said, because hash collisions happen with high
5400 // probability in fuzzing mode, if we're fuzzing we just close the
5401 // channel and move on.
5402 #[cfg(not(fuzzing))]
5403 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5405 self.update_time_counter += 1;
5406 let err_reason = "funding tx had wrong script/value or output index";
5407 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5409 if self.is_outbound() {
5410 for input in tx.input.iter() {
5411 if input.witness.is_empty() {
5412 // We generated a malleable funding transaction, implying we've
5413 // just exposed ourselves to funds loss to our counterparty.
5414 #[cfg(not(fuzzing))]
5415 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5419 self.funding_tx_confirmation_height = height;
5420 self.funding_tx_confirmed_in = Some(*block_hash);
5421 self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5422 Ok(scid) => Some(scid),
5423 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5427 // If we allow 1-conf funding, we may need to check for channel_ready here and
5428 // send it immediately instead of waiting for a best_block_updated call (which
5429 // may have already happened for this block).
5430 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5431 log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
5432 let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
5433 return Ok((Some(channel_ready), announcement_sigs));
5436 for inp in tx.input.iter() {
5437 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5438 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
5439 return Err(ClosureReason::CommitmentTxConfirmed);
5447 /// When a new block is connected, we check the height of the block against outbound holding
5448 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5449 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5450 /// handled by the ChannelMonitor.
5452 /// If we return Err, the channel may have been closed, at which point the standard
5453 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5456 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5458 pub fn best_block_updated<NS: Deref, L: Deref>(
5459 &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash,
5460 node_signer: &NS, user_config: &UserConfig, logger: &L
5461 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5463 NS::Target: NodeSigner,
5466 self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger)
5469 fn do_best_block_updated<NS: Deref, L: Deref>(
5470 &mut self, height: u32, highest_header_time: u32,
5471 genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L
5472 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5474 NS::Target: NodeSigner,
5477 let mut timed_out_htlcs = Vec::new();
5478 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5479 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5481 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5482 self.holding_cell_htlc_updates.retain(|htlc_update| {
5484 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5485 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5486 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5494 self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
5496 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5497 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
5498 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
5500 log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
5501 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5504 let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
5505 if non_shutdown_state >= ChannelState::ChannelReady as u32 ||
5506 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5507 let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
5508 if self.funding_tx_confirmation_height == 0 {
5509 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5510 // zero if it has been reorged out, however in either case, our state flags
5511 // indicate we've already sent a channel_ready
5512 funding_tx_confirmations = 0;
5515 // If we've sent channel_ready (or have both sent and received channel_ready), and
5516 // the funding transaction has become unconfirmed,
5517 // close the channel and hope we can get the latest state on chain (because presumably
5518 // the funding transaction is at least still in the mempool of most nodes).
5520 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5521 // 0-conf channel, but not doing so may lead to the
5522 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5524 if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() {
5525 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5526 self.minimum_depth.unwrap(), funding_tx_confirmations);
5527 return Err(ClosureReason::ProcessingError { err: err_reason });
5529 } else if !self.is_outbound() && self.funding_tx_confirmed_in.is_none() &&
5530 height >= self.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5531 log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id));
5532 // If funding_tx_confirmed_in is unset, the channel must not be active
5533 assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
5534 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5535 return Err(ClosureReason::FundingTimedOut);
5538 let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
5539 self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
5541 Ok((None, timed_out_htlcs, announcement_sigs))
5544 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5545 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5546 /// before the channel has reached channel_ready and we can just wait for more blocks.
5547 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5548 if self.funding_tx_confirmation_height != 0 {
5549 // We handle the funding disconnection by calling best_block_updated with a height one
5550 // below where our funding was connected, implying a reorg back to conf_height - 1.
5551 let reorg_height = self.funding_tx_confirmation_height - 1;
5552 // We use the time field to bump the current time we set on channel updates if its
5553 // larger. If we don't know that time has moved forward, we can just set it to the last
5554 // time we saw and it will be ignored.
5555 let best_time = self.update_time_counter;
5556 match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
5557 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5558 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5559 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5560 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5566 // We never learned about the funding confirmation anyway, just ignore
5571 // Methods to get unprompted messages to send to the remote end (or where we already returned
5572 // something in the handler for the message that prompted this message):
5574 pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
5575 if !self.is_outbound() {
5576 panic!("Tried to open a channel for an inbound channel?");
5578 if self.channel_state != ChannelState::OurInitSent as u32 {
5579 panic!("Cannot generate an open_channel after we've moved forward");
5582 if self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5583 panic!("Tried to send an open_channel for a channel that has already advanced");
5586 let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
5587 let keys = self.get_holder_pubkeys();
5591 temporary_channel_id: self.channel_id,
5592 funding_satoshis: self.channel_value_satoshis,
5593 push_msat: self.channel_value_satoshis * 1000 - self.value_to_self_msat,
5594 dust_limit_satoshis: self.holder_dust_limit_satoshis,
5595 max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat,
5596 channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis,
5597 htlc_minimum_msat: self.holder_htlc_minimum_msat,
5598 feerate_per_kw: self.feerate_per_kw as u32,
5599 to_self_delay: self.get_holder_selected_contest_delay(),
5600 max_accepted_htlcs: self.holder_max_accepted_htlcs,
5601 funding_pubkey: keys.funding_pubkey,
5602 revocation_basepoint: keys.revocation_basepoint,
5603 payment_point: keys.payment_point,
5604 delayed_payment_basepoint: keys.delayed_payment_basepoint,
5605 htlc_basepoint: keys.htlc_basepoint,
5606 first_per_commitment_point,
5607 channel_flags: if self.config.announced_channel {1} else {0},
5608 shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
5609 Some(script) => script.clone().into_inner(),
5610 None => Builder::new().into_script(),
5612 channel_type: Some(self.channel_type.clone()),
5616 pub fn inbound_is_awaiting_accept(&self) -> bool {
5617 self.inbound_awaiting_accept
5620 /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
5621 pub fn set_0conf(&mut self) {
5622 assert!(self.inbound_awaiting_accept);
5623 self.minimum_depth = Some(0);
5626 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
5627 /// should be sent back to the counterparty node.
5629 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5630 pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
5631 if self.is_outbound() {
5632 panic!("Tried to send accept_channel for an outbound channel?");
5634 if self.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
5635 panic!("Tried to send accept_channel after channel had moved forward");
5637 if self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5638 panic!("Tried to send an accept_channel for a channel that has already advanced");
5640 if !self.inbound_awaiting_accept {
5641 panic!("The inbound channel has already been accepted");
5644 self.user_id = user_id;
5645 self.inbound_awaiting_accept = false;
5647 self.generate_accept_channel_message()
5650 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
5651 /// inbound channel. If the intention is to accept an inbound channel, use
5652 /// [`Channel::accept_inbound_channel`] instead.
5654 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5655 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
5656 let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
5657 let keys = self.get_holder_pubkeys();
5659 msgs::AcceptChannel {
5660 temporary_channel_id: self.channel_id,
5661 dust_limit_satoshis: self.holder_dust_limit_satoshis,
5662 max_htlc_value_in_flight_msat: self.holder_max_htlc_value_in_flight_msat,
5663 channel_reserve_satoshis: self.holder_selected_channel_reserve_satoshis,
5664 htlc_minimum_msat: self.holder_htlc_minimum_msat,
5665 minimum_depth: self.minimum_depth.unwrap(),
5666 to_self_delay: self.get_holder_selected_contest_delay(),
5667 max_accepted_htlcs: self.holder_max_accepted_htlcs,
5668 funding_pubkey: keys.funding_pubkey,
5669 revocation_basepoint: keys.revocation_basepoint,
5670 payment_point: keys.payment_point,
5671 delayed_payment_basepoint: keys.delayed_payment_basepoint,
5672 htlc_basepoint: keys.htlc_basepoint,
5673 first_per_commitment_point,
5674 shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
5675 Some(script) => script.clone().into_inner(),
5676 None => Builder::new().into_script(),
5678 channel_type: Some(self.channel_type.clone()),
5680 next_local_nonce: None,
5684 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
5685 /// inbound channel without accepting it.
5687 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
5689 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
5690 self.generate_accept_channel_message()
5693 /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
5694 fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
5695 let counterparty_keys = self.build_remote_transaction_keys();
5696 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
5697 Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
5698 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
5701 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
5702 /// a funding_created message for the remote peer.
5703 /// Panics if called at some time other than immediately after initial handshake, if called twice,
5704 /// or if called on an inbound channel.
5705 /// Note that channel_id changes during this call!
5706 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
5707 /// If an Err is returned, it is a ChannelError::Close.
5708 pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
5709 if !self.is_outbound() {
5710 panic!("Tried to create outbound funding_created message on an inbound channel!");
5712 if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
5713 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
5715 if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
5716 self.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
5717 self.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
5718 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
5721 self.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
5722 self.holder_signer.provide_channel_parameters(&self.channel_transaction_parameters);
5724 let signature = match self.get_outbound_funding_created_signature(logger) {
5727 log_error!(logger, "Got bad signatures: {:?}!", e);
5728 self.channel_transaction_parameters.funding_outpoint = None;
5733 let temporary_channel_id = self.channel_id;
5735 // Now that we're past error-generating stuff, update our local state:
5737 self.channel_state = ChannelState::FundingCreated as u32;
5738 self.channel_id = funding_txo.to_channel_id();
5739 self.funding_transaction = Some(funding_transaction);
5741 Ok(msgs::FundingCreated {
5742 temporary_channel_id,
5743 funding_txid: funding_txo.txid,
5744 funding_output_index: funding_txo.index,
5747 partial_signature_with_nonce: None,
5749 next_local_nonce: None,
5753 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5754 /// announceable and available for use (have exchanged ChannelReady messages in both
5755 /// directions). Should be used for both broadcasted announcements and in response to an
5756 /// AnnouncementSignatures message from the remote peer.
5758 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5761 /// This will only return ChannelError::Ignore upon failure.
5762 fn get_channel_announcement<NS: Deref>(
5763 &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
5764 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5765 if !self.config.announced_channel {
5766 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5768 if !self.is_usable() {
5769 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5772 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5773 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5774 let counterparty_node_id = NodeId::from_pubkey(&self.get_counterparty_node_id());
5775 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5777 let msg = msgs::UnsignedChannelAnnouncement {
5778 features: channelmanager::provided_channel_features(&user_config),
5780 short_channel_id: self.get_short_channel_id().unwrap(),
5781 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5782 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5783 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }),
5784 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.get_holder_pubkeys().funding_pubkey }),
5785 excess_data: Vec::new(),
5791 fn get_announcement_sigs<NS: Deref, L: Deref>(
5792 &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
5793 best_block_height: u32, logger: &L
5794 ) -> Option<msgs::AnnouncementSignatures>
5796 NS::Target: NodeSigner,
5799 if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height {
5803 if !self.is_usable() {
5807 if self.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5808 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5812 if self.announcement_sigs_state != AnnouncementSigsState::NotSent {
5816 log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id()));
5817 let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
5820 log_trace!(logger, "{:?}", e);
5824 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5826 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5831 let our_bitcoin_sig = match self.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.secp_ctx) {
5833 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5838 self.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5840 Some(msgs::AnnouncementSignatures {
5841 channel_id: self.channel_id(),
5842 short_channel_id: self.get_short_channel_id().unwrap(),
5843 node_signature: our_node_sig,
5844 bitcoin_signature: our_bitcoin_sig,
5848 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5850 fn sign_channel_announcement<NS: Deref>(
5851 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5852 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5853 if let Some((their_node_sig, their_bitcoin_sig)) = self.announcement_sigs {
5854 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5855 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5856 let were_node_one = announcement.node_id_1 == our_node_key;
5858 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5859 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5860 let our_bitcoin_sig = self.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.secp_ctx)
5861 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5862 Ok(msgs::ChannelAnnouncement {
5863 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5864 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5865 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5866 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5867 contents: announcement,
5870 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5874 /// Processes an incoming announcement_signatures message, providing a fully-signed
5875 /// channel_announcement message which we can broadcast and storing our counterparty's
5876 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5877 pub fn announcement_signatures<NS: Deref>(
5878 &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
5879 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5880 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5881 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5883 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5885 if self.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() {
5886 return Err(ChannelError::Close(format!(
5887 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5888 &announcement, self.get_counterparty_node_id())));
5890 if self.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() {
5891 return Err(ChannelError::Close(format!(
5892 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5893 &announcement, self.counterparty_funding_pubkey())));
5896 self.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5897 if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height {
5898 return Err(ChannelError::Ignore(
5899 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5902 self.sign_channel_announcement(node_signer, announcement)
5905 /// Gets a signed channel_announcement for this channel, if we previously received an
5906 /// announcement_signatures from our counterparty.
5907 pub fn get_signed_channel_announcement<NS: Deref>(
5908 &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
5909 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5910 if self.funding_tx_confirmation_height == 0 || self.funding_tx_confirmation_height + 5 > best_block_height {
5913 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5915 Err(_) => return None,
5917 match self.sign_channel_announcement(node_signer, announcement) {
5918 Ok(res) => Some(res),
5923 /// May panic if called on a channel that wasn't immediately-previously
5924 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5925 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5926 assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5927 assert_ne!(self.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5928 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5929 // current to_remote balances. However, it no longer has any use, and thus is now simply
5930 // set to a dummy (but valid, as required by the spec) public key.
5931 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5932 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5933 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5934 let mut pk = [2; 33]; pk[1] = 0xff;
5935 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5936 let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5937 let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap();
5938 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
5941 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
5944 self.mark_awaiting_response();
5945 msgs::ChannelReestablish {
5946 channel_id: self.channel_id(),
5947 // The protocol has two different commitment number concepts - the "commitment
5948 // transaction number", which starts from 0 and counts up, and the "revocation key
5949 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5950 // commitment transaction numbers by the index which will be used to reveal the
5951 // revocation key for that commitment transaction, which means we have to convert them
5952 // to protocol-level commitment numbers here...
5954 // next_local_commitment_number is the next commitment_signed number we expect to
5955 // receive (indicating if they need to resend one that we missed).
5956 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number,
5957 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5958 // receive, however we track it by the next commitment number for a remote transaction
5959 // (which is one further, as they always revoke previous commitment transaction, not
5960 // the one we send) so we have to decrement by 1. Note that if
5961 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5962 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5964 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1,
5965 your_last_per_commitment_secret: remote_last_secret,
5966 my_current_per_commitment_point: dummy_pubkey,
5967 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5968 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5969 // txid of that interactive transaction, else we MUST NOT set it.
5970 next_funding_txid: None,
5975 // Send stuff to our remote peers:
5977 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5978 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5979 /// commitment update.
5981 /// `Err`s will only be [`ChannelError::Ignore`].
5982 pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5983 onion_routing_packet: msgs::OnionPacket, logger: &L)
5984 -> Result<(), ChannelError> where L::Target: Logger {
5986 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
5987 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5989 if let ChannelError::Ignore(_) = err { /* fine */ }
5990 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5995 /// Adds a pending outbound HTLC to this channel, note that you probably want
5996 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5998 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6000 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6001 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6003 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6004 /// we may not yet have sent the previous commitment update messages and will need to
6005 /// regenerate them.
6007 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6008 /// on this [`Channel`] if `force_holding_cell` is false.
6010 /// `Err`s will only be [`ChannelError::Ignore`].
6011 fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6012 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
6013 -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
6014 if (self.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
6015 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6017 let channel_total_msat = self.channel_value_satoshis * 1000;
6018 if amount_msat > channel_total_msat {
6019 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6022 if amount_msat == 0 {
6023 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6026 let available_balances = self.get_available_balances();
6027 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6028 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6029 available_balances.next_outbound_htlc_minimum_msat)));
6032 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6033 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6034 available_balances.next_outbound_htlc_limit_msat)));
6037 if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
6038 // Note that this should never really happen, if we're !is_live() on receipt of an
6039 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6040 // the user to send directly into a !is_live() channel. However, if we
6041 // disconnected during the time the previous hop was doing the commitment dance we may
6042 // end up getting here after the forwarding delay. In any case, returning an
6043 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6044 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6047 let need_holding_cell = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
6048 log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
6049 if force_holding_cell { "into holding cell" }
6050 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6051 else { "to peer" });
6053 if need_holding_cell {
6054 force_holding_cell = true;
6057 // Now update local state:
6058 if force_holding_cell {
6059 self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6064 onion_routing_packet,
6069 self.pending_outbound_htlcs.push(OutboundHTLCOutput {
6070 htlc_id: self.next_holder_htlc_id,
6072 payment_hash: payment_hash.clone(),
6074 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6078 let res = msgs::UpdateAddHTLC {
6079 channel_id: self.channel_id,
6080 htlc_id: self.next_holder_htlc_id,
6084 onion_routing_packet,
6086 self.next_holder_htlc_id += 1;
6091 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6092 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6093 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6094 // fail to generate this, we still are at least at a position where upgrading their status
6096 for htlc in self.pending_inbound_htlcs.iter_mut() {
6097 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6098 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6100 if let Some(state) = new_state {
6101 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
6105 for htlc in self.pending_outbound_htlcs.iter_mut() {
6106 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6107 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
6108 // Grab the preimage, if it exists, instead of cloning
6109 let mut reason = OutboundHTLCOutcome::Success(None);
6110 mem::swap(outcome, &mut reason);
6111 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6114 if let Some((feerate, update_state)) = self.pending_update_fee {
6115 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6116 debug_assert!(!self.is_outbound());
6117 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6118 self.feerate_per_kw = feerate;
6119 self.pending_update_fee = None;
6122 self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6124 let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
6125 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6126 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6128 if self.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6129 self.announcement_sigs_state = AnnouncementSigsState::Committed;
6132 self.latest_monitor_update_id += 1;
6133 let monitor_update = ChannelMonitorUpdate {
6134 update_id: self.latest_monitor_update_id,
6135 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6136 commitment_txid: counterparty_commitment_txid,
6137 htlc_outputs: htlcs.clone(),
6138 commitment_number: self.cur_counterparty_commitment_transaction_number,
6139 their_per_commitment_point: self.counterparty_cur_commitment_point.unwrap()
6142 self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
6146 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
6147 let counterparty_keys = self.build_remote_transaction_keys();
6148 let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6149 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6151 #[cfg(any(test, fuzzing))]
6153 if !self.is_outbound() {
6154 let projected_commit_tx_info = self.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6155 *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6156 if let Some(info) = projected_commit_tx_info {
6157 let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
6158 if info.total_pending_htlcs == total_pending_htlcs
6159 && info.next_holder_htlc_id == self.next_holder_htlc_id
6160 && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id
6161 && info.feerate == self.feerate_per_kw {
6162 let actual_fee = Self::commit_tx_fee_msat(self.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.opt_anchors());
6163 assert_eq!(actual_fee, info.fee);
6169 (counterparty_commitment_txid, commitment_stats.htlcs_included)
6172 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6173 /// generation when we shouldn't change HTLC/channel state.
6174 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6175 // Get the fee tests from `build_commitment_no_state_update`
6176 #[cfg(any(test, fuzzing))]
6177 self.build_commitment_no_state_update(logger);
6179 let counterparty_keys = self.build_remote_transaction_keys();
6180 let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6181 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6182 let (signature, htlc_signatures);
6185 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6186 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6190 let res = self.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.secp_ctx)
6191 .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
6193 htlc_signatures = res.1;
6195 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6196 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6197 &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()),
6198 log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id()));
6200 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6201 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6202 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6203 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.opt_anchors(), &counterparty_keys)),
6204 log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
6205 log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
6209 Ok((msgs::CommitmentSigned {
6210 channel_id: self.channel_id,
6214 partial_signature_with_nonce: None,
6215 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6218 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6219 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6221 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6222 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6223 pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
6224 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
6225 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6228 let monitor_update = self.build_commitment_no_status_check(logger);
6229 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6230 Ok(self.push_ret_blockable_mon_update(monitor_update))
6236 /// Get forwarding information for the counterparty.
6237 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
6238 self.counterparty_forwarding_info.clone()
6241 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
6242 if msg.contents.htlc_minimum_msat >= self.channel_value_satoshis * 1000 {
6243 return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
6245 self.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
6246 fee_base_msat: msg.contents.fee_base_msat,
6247 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6248 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6254 /// Begins the shutdown process, getting a message for the remote peer and returning all
6255 /// holding cell HTLCs for payment failure.
6257 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
6258 /// [`ChannelMonitorUpdate`] will be returned).
6259 pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6260 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6261 -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6262 where SP::Target: SignerProvider {
6263 for htlc in self.pending_outbound_htlcs.iter() {
6264 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6265 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6268 if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
6269 if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
6270 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6272 else if (self.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
6273 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6276 if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6277 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6279 assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
6280 if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
6281 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6284 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
6285 // script is set, we just force-close and call it a day.
6286 let mut chan_closed = false;
6287 if self.channel_state < ChannelState::FundingSent as u32 {
6291 let update_shutdown_script = match self.shutdown_scriptpubkey {
6293 None if !chan_closed => {
6294 // use override shutdown script if provided
6295 let shutdown_scriptpubkey = match override_shutdown_script {
6296 Some(script) => script,
6298 // otherwise, use the shutdown scriptpubkey provided by the signer
6299 match signer_provider.get_shutdown_scriptpubkey() {
6300 Ok(scriptpubkey) => scriptpubkey,
6301 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6305 if !shutdown_scriptpubkey.is_compatible(their_features) {
6306 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6308 self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6314 // From here on out, we may not fail!
6315 self.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6316 if self.channel_state < ChannelState::FundingSent as u32 {
6317 self.channel_state = ChannelState::ShutdownComplete as u32;
6319 self.channel_state |= ChannelState::LocalShutdownSent as u32;
6321 self.update_time_counter += 1;
6323 let monitor_update = if update_shutdown_script {
6324 self.latest_monitor_update_id += 1;
6325 let monitor_update = ChannelMonitorUpdate {
6326 update_id: self.latest_monitor_update_id,
6327 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6328 scriptpubkey: self.get_closing_scriptpubkey(),
6331 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6332 if self.push_blockable_mon_update(monitor_update) {
6333 self.pending_monitor_updates.last().map(|upd| &upd.update)
6336 let shutdown = msgs::Shutdown {
6337 channel_id: self.channel_id,
6338 scriptpubkey: self.get_closing_scriptpubkey(),
6341 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6342 // our shutdown until we've committed all of the pending changes.
6343 self.holding_cell_update_fee = None;
6344 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
6345 self.holding_cell_htlc_updates.retain(|htlc_update| {
6347 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6348 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6355 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6356 "we can't both complete shutdown and return a monitor update");
6358 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6361 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
6362 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
6363 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
6364 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
6365 /// immediately (others we will have to allow to time out).
6366 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
6367 // Note that we MUST only generate a monitor update that indicates force-closure - we're
6368 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
6369 // being fully configured in some cases. Thus, its likely any monitor events we generate will
6370 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
6371 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
6373 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
6374 // return them to fail the payment.
6375 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
6376 let counterparty_node_id = self.get_counterparty_node_id();
6377 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
6379 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
6380 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
6385 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
6386 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
6387 // returning a channel monitor update here would imply a channel monitor update before
6388 // we even registered the channel monitor to begin with, which is invalid.
6389 // Thus, if we aren't actually at a point where we could conceivably broadcast the
6390 // funding transaction, don't return a funding txo (which prevents providing the
6391 // monitor update to the user, even if we return one).
6392 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
6393 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
6394 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
6395 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
6396 update_id: self.latest_monitor_update_id,
6397 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
6402 self.channel_state = ChannelState::ShutdownComplete as u32;
6403 self.update_time_counter += 1;
6404 (monitor_update, dropped_outbound_htlcs)
6407 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6408 self.holding_cell_htlc_updates.iter()
6409 .flat_map(|htlc_update| {
6411 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6412 => Some((source, payment_hash)),
6416 .chain(self.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6420 const SERIALIZATION_VERSION: u8 = 3;
6421 const MIN_SERIALIZATION_VERSION: u8 = 2;
6423 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6429 impl Writeable for ChannelUpdateStatus {
6430 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6431 // We only care about writing out the current state as it was announced, ie only either
6432 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6433 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6435 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6436 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6437 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6438 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6444 impl Readable for ChannelUpdateStatus {
6445 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6446 Ok(match <u8 as Readable>::read(reader)? {
6447 0 => ChannelUpdateStatus::Enabled,
6448 1 => ChannelUpdateStatus::Disabled,
6449 _ => return Err(DecodeError::InvalidValue),
6454 impl Writeable for AnnouncementSigsState {
6455 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6456 // We only care about writing out the current state as if we had just disconnected, at
6457 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6459 AnnouncementSigsState::NotSent => 0u8.write(writer),
6460 AnnouncementSigsState::MessageSent => 0u8.write(writer),
6461 AnnouncementSigsState::Committed => 0u8.write(writer),
6462 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6467 impl Readable for AnnouncementSigsState {
6468 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6469 Ok(match <u8 as Readable>::read(reader)? {
6470 0 => AnnouncementSigsState::NotSent,
6471 1 => AnnouncementSigsState::PeerReceived,
6472 _ => return Err(DecodeError::InvalidValue),
6477 impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
6478 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6479 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
6482 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6484 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6485 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
6486 // the low bytes now and the optional high bytes later.
6487 let user_id_low = self.user_id as u64;
6488 user_id_low.write(writer)?;
6490 // Version 1 deserializers expected to read parts of the config object here. Version 2
6491 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
6492 // `minimum_depth` we simply write dummy values here.
6493 writer.write_all(&[0; 8])?;
6495 self.channel_id.write(writer)?;
6496 (self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
6497 self.channel_value_satoshis.write(writer)?;
6499 self.latest_monitor_update_id.write(writer)?;
6501 let mut key_data = VecWriter(Vec::new());
6502 self.holder_signer.write(&mut key_data)?;
6503 assert!(key_data.0.len() < core::usize::MAX);
6504 assert!(key_data.0.len() < core::u32::MAX as usize);
6505 (key_data.0.len() as u32).write(writer)?;
6506 writer.write_all(&key_data.0[..])?;
6508 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
6509 // deserialized from that format.
6510 match self.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
6511 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
6512 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
6514 self.destination_script.write(writer)?;
6516 self.cur_holder_commitment_transaction_number.write(writer)?;
6517 self.cur_counterparty_commitment_transaction_number.write(writer)?;
6518 self.value_to_self_msat.write(writer)?;
6520 let mut dropped_inbound_htlcs = 0;
6521 for htlc in self.pending_inbound_htlcs.iter() {
6522 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
6523 dropped_inbound_htlcs += 1;
6526 (self.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
6527 for htlc in self.pending_inbound_htlcs.iter() {
6528 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
6531 htlc.htlc_id.write(writer)?;
6532 htlc.amount_msat.write(writer)?;
6533 htlc.cltv_expiry.write(writer)?;
6534 htlc.payment_hash.write(writer)?;
6536 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
6537 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
6539 htlc_state.write(writer)?;
6541 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
6543 htlc_state.write(writer)?;
6545 &InboundHTLCState::Committed => {
6548 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
6550 removal_reason.write(writer)?;
6555 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
6557 (self.pending_outbound_htlcs.len() as u64).write(writer)?;
6558 for htlc in self.pending_outbound_htlcs.iter() {
6559 htlc.htlc_id.write(writer)?;
6560 htlc.amount_msat.write(writer)?;
6561 htlc.cltv_expiry.write(writer)?;
6562 htlc.payment_hash.write(writer)?;
6563 htlc.source.write(writer)?;
6565 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
6567 onion_packet.write(writer)?;
6569 &OutboundHTLCState::Committed => {
6572 &OutboundHTLCState::RemoteRemoved(_) => {
6573 // Treat this as a Committed because we haven't received the CS - they'll
6574 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
6577 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
6579 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6580 preimages.push(preimage);
6582 let reason: Option<&HTLCFailReason> = outcome.into();
6583 reason.write(writer)?;
6585 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
6587 if let OutboundHTLCOutcome::Success(preimage) = outcome {
6588 preimages.push(preimage);
6590 let reason: Option<&HTLCFailReason> = outcome.into();
6591 reason.write(writer)?;
6596 (self.holding_cell_htlc_updates.len() as u64).write(writer)?;
6597 for update in self.holding_cell_htlc_updates.iter() {
6599 &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
6601 amount_msat.write(writer)?;
6602 cltv_expiry.write(writer)?;
6603 payment_hash.write(writer)?;
6604 source.write(writer)?;
6605 onion_routing_packet.write(writer)?;
6607 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
6609 payment_preimage.write(writer)?;
6610 htlc_id.write(writer)?;
6612 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
6614 htlc_id.write(writer)?;
6615 err_packet.write(writer)?;
6620 match self.resend_order {
6621 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
6622 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
6625 self.monitor_pending_channel_ready.write(writer)?;
6626 self.monitor_pending_revoke_and_ack.write(writer)?;
6627 self.monitor_pending_commitment_signed.write(writer)?;
6629 (self.monitor_pending_forwards.len() as u64).write(writer)?;
6630 for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() {
6631 pending_forward.write(writer)?;
6632 htlc_id.write(writer)?;
6635 (self.monitor_pending_failures.len() as u64).write(writer)?;
6636 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.monitor_pending_failures.iter() {
6637 htlc_source.write(writer)?;
6638 payment_hash.write(writer)?;
6639 fail_reason.write(writer)?;
6642 if self.is_outbound() {
6643 self.pending_update_fee.map(|(a, _)| a).write(writer)?;
6644 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.pending_update_fee {
6645 Some(feerate).write(writer)?;
6647 // As for inbound HTLCs, if the update was only announced and never committed in a
6648 // commitment_signed, drop it.
6649 None::<u32>.write(writer)?;
6651 self.holding_cell_update_fee.write(writer)?;
6653 self.next_holder_htlc_id.write(writer)?;
6654 (self.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
6655 self.update_time_counter.write(writer)?;
6656 self.feerate_per_kw.write(writer)?;
6658 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6659 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6660 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6661 // consider the stale state on reload.
6664 self.funding_tx_confirmed_in.write(writer)?;
6665 self.funding_tx_confirmation_height.write(writer)?;
6666 self.short_channel_id.write(writer)?;
6668 self.counterparty_dust_limit_satoshis.write(writer)?;
6669 self.holder_dust_limit_satoshis.write(writer)?;
6670 self.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
6672 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6673 self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
6675 self.counterparty_htlc_minimum_msat.write(writer)?;
6676 self.holder_htlc_minimum_msat.write(writer)?;
6677 self.counterparty_max_accepted_htlcs.write(writer)?;
6679 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
6680 self.minimum_depth.unwrap_or(0).write(writer)?;
6682 match &self.counterparty_forwarding_info {
6685 info.fee_base_msat.write(writer)?;
6686 info.fee_proportional_millionths.write(writer)?;
6687 info.cltv_expiry_delta.write(writer)?;
6689 None => 0u8.write(writer)?
6692 self.channel_transaction_parameters.write(writer)?;
6693 self.funding_transaction.write(writer)?;
6695 self.counterparty_cur_commitment_point.write(writer)?;
6696 self.counterparty_prev_commitment_point.write(writer)?;
6697 self.counterparty_node_id.write(writer)?;
6699 self.counterparty_shutdown_scriptpubkey.write(writer)?;
6701 self.commitment_secrets.write(writer)?;
6703 self.channel_update_status.write(writer)?;
6705 #[cfg(any(test, fuzzing))]
6706 (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
6707 #[cfg(any(test, fuzzing))]
6708 for htlc in self.historical_inbound_htlc_fulfills.iter() {
6709 htlc.write(writer)?;
6712 // If the channel type is something other than only-static-remote-key, then we need to have
6713 // older clients fail to deserialize this channel at all. If the type is
6714 // only-static-remote-key, we simply consider it "default" and don't write the channel type
6716 let chan_type = if self.channel_type != ChannelTypeFeatures::only_static_remote_key() {
6717 Some(&self.channel_type) } else { None };
6719 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
6720 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
6721 // a different percentage of the channel value then 10%, which older versions of LDK used
6722 // to set it to before the percentage was made configurable.
6723 let serialized_holder_selected_reserve =
6724 if self.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
6725 { Some(self.holder_selected_channel_reserve_satoshis) } else { None };
6727 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
6728 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
6729 let serialized_holder_htlc_max_in_flight =
6730 if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config)
6731 { Some(self.holder_max_htlc_value_in_flight_msat) } else { None };
6733 let channel_pending_event_emitted = Some(self.channel_pending_event_emitted);
6734 let channel_ready_event_emitted = Some(self.channel_ready_event_emitted);
6736 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6737 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
6738 // we write the high bytes as an option here.
6739 let user_id_high_opt = Some((self.user_id >> 64) as u64);
6741 let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) };
6743 write_tlv_fields!(writer, {
6744 (0, self.announcement_sigs, option),
6745 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
6746 // default value instead of being Option<>al. Thus, to maintain compatibility we write
6747 // them twice, once with their original default values above, and once as an option
6748 // here. On the read side, old versions will simply ignore the odd-type entries here,
6749 // and new versions map the default values to None and allow the TLV entries here to
6751 (1, self.minimum_depth, option),
6752 (2, chan_type, option),
6753 (3, self.counterparty_selected_channel_reserve_satoshis, option),
6754 (4, serialized_holder_selected_reserve, option),
6755 (5, self.config, required),
6756 (6, serialized_holder_htlc_max_in_flight, option),
6757 (7, self.shutdown_scriptpubkey, option),
6758 (9, self.target_closing_feerate_sats_per_kw, option),
6759 (11, self.monitor_pending_finalized_fulfills, vec_type),
6760 (13, self.channel_creation_height, required),
6761 (15, preimages, vec_type),
6762 (17, self.announcement_sigs_state, required),
6763 (19, self.latest_inbound_scid_alias, option),
6764 (21, self.outbound_scid_alias, required),
6765 (23, channel_ready_event_emitted, option),
6766 (25, user_id_high_opt, option),
6767 (27, self.channel_keys_id, required),
6768 (28, holder_max_accepted_htlcs, option),
6769 (29, self.temporary_channel_id, option),
6770 (31, channel_pending_event_emitted, option),
6771 (33, self.pending_monitor_updates, vec_type),
6778 const MAX_ALLOC_SIZE: usize = 64*1024;
6779 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<<SP::Target as SignerProvider>::Signer>
6781 ES::Target: EntropySource,
6782 SP::Target: SignerProvider
6784 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
6785 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
6786 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
6788 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
6789 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
6790 // the low bytes now and the high bytes later.
6791 let user_id_low: u64 = Readable::read(reader)?;
6793 let mut config = Some(LegacyChannelConfig::default());
6795 // Read the old serialization of the ChannelConfig from version 0.0.98.
6796 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
6797 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
6798 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
6799 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
6801 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
6802 let mut _val: u64 = Readable::read(reader)?;
6805 let channel_id = Readable::read(reader)?;
6806 let channel_state = Readable::read(reader)?;
6807 let channel_value_satoshis = Readable::read(reader)?;
6809 let latest_monitor_update_id = Readable::read(reader)?;
6811 let mut keys_data = None;
6813 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
6814 // the `channel_keys_id` TLV is present below.
6815 let keys_len: u32 = Readable::read(reader)?;
6816 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
6817 while keys_data.as_ref().unwrap().len() != keys_len as usize {
6818 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
6819 let mut data = [0; 1024];
6820 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
6821 reader.read_exact(read_slice)?;
6822 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
6826 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
6827 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
6828 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
6831 let destination_script = Readable::read(reader)?;
6833 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
6834 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
6835 let value_to_self_msat = Readable::read(reader)?;
6837 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
6839 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6840 for _ in 0..pending_inbound_htlc_count {
6841 pending_inbound_htlcs.push(InboundHTLCOutput {
6842 htlc_id: Readable::read(reader)?,
6843 amount_msat: Readable::read(reader)?,
6844 cltv_expiry: Readable::read(reader)?,
6845 payment_hash: Readable::read(reader)?,
6846 state: match <u8 as Readable>::read(reader)? {
6847 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
6848 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
6849 3 => InboundHTLCState::Committed,
6850 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
6851 _ => return Err(DecodeError::InvalidValue),
6856 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
6857 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
6858 for _ in 0..pending_outbound_htlc_count {
6859 pending_outbound_htlcs.push(OutboundHTLCOutput {
6860 htlc_id: Readable::read(reader)?,
6861 amount_msat: Readable::read(reader)?,
6862 cltv_expiry: Readable::read(reader)?,
6863 payment_hash: Readable::read(reader)?,
6864 source: Readable::read(reader)?,
6865 state: match <u8 as Readable>::read(reader)? {
6866 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
6867 1 => OutboundHTLCState::Committed,
6869 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6870 OutboundHTLCState::RemoteRemoved(option.into())
6873 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6874 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
6877 let option: Option<HTLCFailReason> = Readable::read(reader)?;
6878 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
6880 _ => return Err(DecodeError::InvalidValue),
6885 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
6886 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
6887 for _ in 0..holding_cell_htlc_update_count {
6888 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
6889 0 => HTLCUpdateAwaitingACK::AddHTLC {
6890 amount_msat: Readable::read(reader)?,
6891 cltv_expiry: Readable::read(reader)?,
6892 payment_hash: Readable::read(reader)?,
6893 source: Readable::read(reader)?,
6894 onion_routing_packet: Readable::read(reader)?,
6896 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
6897 payment_preimage: Readable::read(reader)?,
6898 htlc_id: Readable::read(reader)?,
6900 2 => HTLCUpdateAwaitingACK::FailHTLC {
6901 htlc_id: Readable::read(reader)?,
6902 err_packet: Readable::read(reader)?,
6904 _ => return Err(DecodeError::InvalidValue),
6908 let resend_order = match <u8 as Readable>::read(reader)? {
6909 0 => RAACommitmentOrder::CommitmentFirst,
6910 1 => RAACommitmentOrder::RevokeAndACKFirst,
6911 _ => return Err(DecodeError::InvalidValue),
6914 let monitor_pending_channel_ready = Readable::read(reader)?;
6915 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
6916 let monitor_pending_commitment_signed = Readable::read(reader)?;
6918 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
6919 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
6920 for _ in 0..monitor_pending_forwards_count {
6921 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
6924 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
6925 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
6926 for _ in 0..monitor_pending_failures_count {
6927 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
6930 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
6932 let holding_cell_update_fee = Readable::read(reader)?;
6934 let next_holder_htlc_id = Readable::read(reader)?;
6935 let next_counterparty_htlc_id = Readable::read(reader)?;
6936 let update_time_counter = Readable::read(reader)?;
6937 let feerate_per_kw = Readable::read(reader)?;
6939 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
6940 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
6941 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
6942 // consider the stale state on reload.
6943 match <u8 as Readable>::read(reader)? {
6946 let _: u32 = Readable::read(reader)?;
6947 let _: u64 = Readable::read(reader)?;
6948 let _: Signature = Readable::read(reader)?;
6950 _ => return Err(DecodeError::InvalidValue),
6953 let funding_tx_confirmed_in = Readable::read(reader)?;
6954 let funding_tx_confirmation_height = Readable::read(reader)?;
6955 let short_channel_id = Readable::read(reader)?;
6957 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
6958 let holder_dust_limit_satoshis = Readable::read(reader)?;
6959 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
6960 let mut counterparty_selected_channel_reserve_satoshis = None;
6962 // Read the old serialization from version 0.0.98.
6963 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
6965 // Read the 8 bytes of backwards-compatibility data.
6966 let _dummy: u64 = Readable::read(reader)?;
6968 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
6969 let holder_htlc_minimum_msat = Readable::read(reader)?;
6970 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
6972 let mut minimum_depth = None;
6974 // Read the old serialization from version 0.0.98.
6975 minimum_depth = Some(Readable::read(reader)?);
6977 // Read the 4 bytes of backwards-compatibility data.
6978 let _dummy: u32 = Readable::read(reader)?;
6981 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
6983 1 => Some(CounterpartyForwardingInfo {
6984 fee_base_msat: Readable::read(reader)?,
6985 fee_proportional_millionths: Readable::read(reader)?,
6986 cltv_expiry_delta: Readable::read(reader)?,
6988 _ => return Err(DecodeError::InvalidValue),
6991 let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
6992 let funding_transaction = Readable::read(reader)?;
6994 let counterparty_cur_commitment_point = Readable::read(reader)?;
6996 let counterparty_prev_commitment_point = Readable::read(reader)?;
6997 let counterparty_node_id = Readable::read(reader)?;
6999 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7000 let commitment_secrets = Readable::read(reader)?;
7002 let channel_update_status = Readable::read(reader)?;
7004 #[cfg(any(test, fuzzing))]
7005 let mut historical_inbound_htlc_fulfills = HashSet::new();
7006 #[cfg(any(test, fuzzing))]
7008 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7009 for _ in 0..htlc_fulfills_len {
7010 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7014 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7015 Some((feerate, if channel_parameters.is_outbound_from_holder {
7016 FeeUpdateState::Outbound
7018 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7024 let mut announcement_sigs = None;
7025 let mut target_closing_feerate_sats_per_kw = None;
7026 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7027 let mut holder_selected_channel_reserve_satoshis = Some(Self::get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7028 let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7029 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7030 // only, so we default to that if none was written.
7031 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7032 let mut channel_creation_height = Some(serialized_height);
7033 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7035 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7036 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7037 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7038 let mut latest_inbound_scid_alias = None;
7039 let mut outbound_scid_alias = None;
7040 let mut channel_pending_event_emitted = None;
7041 let mut channel_ready_event_emitted = None;
7043 let mut user_id_high_opt: Option<u64> = None;
7044 let mut channel_keys_id: Option<[u8; 32]> = None;
7045 let mut temporary_channel_id: Option<[u8; 32]> = None;
7046 let mut holder_max_accepted_htlcs: Option<u16> = None;
7048 let mut pending_monitor_updates = Some(Vec::new());
7050 read_tlv_fields!(reader, {
7051 (0, announcement_sigs, option),
7052 (1, minimum_depth, option),
7053 (2, channel_type, option),
7054 (3, counterparty_selected_channel_reserve_satoshis, option),
7055 (4, holder_selected_channel_reserve_satoshis, option),
7056 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7057 (6, holder_max_htlc_value_in_flight_msat, option),
7058 (7, shutdown_scriptpubkey, option),
7059 (9, target_closing_feerate_sats_per_kw, option),
7060 (11, monitor_pending_finalized_fulfills, vec_type),
7061 (13, channel_creation_height, option),
7062 (15, preimages_opt, vec_type),
7063 (17, announcement_sigs_state, option),
7064 (19, latest_inbound_scid_alias, option),
7065 (21, outbound_scid_alias, option),
7066 (23, channel_ready_event_emitted, option),
7067 (25, user_id_high_opt, option),
7068 (27, channel_keys_id, option),
7069 (28, holder_max_accepted_htlcs, option),
7070 (29, temporary_channel_id, option),
7071 (31, channel_pending_event_emitted, option),
7072 (33, pending_monitor_updates, vec_type),
7075 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7076 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7077 // If we've gotten to the funding stage of the channel, populate the signer with its
7078 // required channel parameters.
7079 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7080 if non_shutdown_state >= (ChannelState::FundingCreated as u32) {
7081 holder_signer.provide_channel_parameters(&channel_parameters);
7083 (channel_keys_id, holder_signer)
7085 // `keys_data` can be `None` if we had corrupted data.
7086 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7087 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7088 (holder_signer.channel_keys_id(), holder_signer)
7091 if let Some(preimages) = preimages_opt {
7092 let mut iter = preimages.into_iter();
7093 for htlc in pending_outbound_htlcs.iter_mut() {
7095 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7096 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7098 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7099 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7104 // We expect all preimages to be consumed above
7105 if iter.next().is_some() {
7106 return Err(DecodeError::InvalidValue);
7110 let chan_features = channel_type.as_ref().unwrap();
7111 if !chan_features.is_subset(our_supported_features) {
7112 // If the channel was written by a new version and negotiated with features we don't
7113 // understand yet, refuse to read it.
7114 return Err(DecodeError::UnknownRequiredFeature);
7117 let mut secp_ctx = Secp256k1::new();
7118 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7120 // `user_id` used to be a single u64 value. In order to remain backwards
7121 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7122 // separate u64 values.
7123 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7125 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7130 config: config.unwrap(),
7134 // Note that we don't care about serializing handshake limits as we only ever serialize
7135 // channel data after the handshake has completed.
7136 inbound_handshake_limits_override: None,
7139 temporary_channel_id,
7141 announcement_sigs_state: announcement_sigs_state.unwrap(),
7143 channel_value_satoshis,
7145 latest_monitor_update_id,
7148 shutdown_scriptpubkey,
7151 cur_holder_commitment_transaction_number,
7152 cur_counterparty_commitment_transaction_number,
7155 holder_max_accepted_htlcs,
7156 pending_inbound_htlcs,
7157 pending_outbound_htlcs,
7158 holding_cell_htlc_updates,
7162 monitor_pending_channel_ready,
7163 monitor_pending_revoke_and_ack,
7164 monitor_pending_commitment_signed,
7165 monitor_pending_forwards,
7166 monitor_pending_failures,
7167 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7170 holding_cell_update_fee,
7171 next_holder_htlc_id,
7172 next_counterparty_htlc_id,
7173 update_time_counter,
7176 #[cfg(debug_assertions)]
7177 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7178 #[cfg(debug_assertions)]
7179 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7181 last_sent_closing_fee: None,
7182 pending_counterparty_closing_signed: None,
7183 closing_fee_limits: None,
7184 target_closing_feerate_sats_per_kw,
7186 inbound_awaiting_accept: false,
7188 funding_tx_confirmed_in,
7189 funding_tx_confirmation_height,
7191 channel_creation_height: channel_creation_height.unwrap(),
7193 counterparty_dust_limit_satoshis,
7194 holder_dust_limit_satoshis,
7195 counterparty_max_htlc_value_in_flight_msat,
7196 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7197 counterparty_selected_channel_reserve_satoshis,
7198 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7199 counterparty_htlc_minimum_msat,
7200 holder_htlc_minimum_msat,
7201 counterparty_max_accepted_htlcs,
7204 counterparty_forwarding_info,
7206 channel_transaction_parameters: channel_parameters,
7207 funding_transaction,
7209 counterparty_cur_commitment_point,
7210 counterparty_prev_commitment_point,
7211 counterparty_node_id,
7213 counterparty_shutdown_scriptpubkey,
7217 channel_update_status,
7218 closing_signed_in_flight: false,
7222 #[cfg(any(test, fuzzing))]
7223 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7224 #[cfg(any(test, fuzzing))]
7225 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7227 workaround_lnd_bug_4006: None,
7228 sent_message_awaiting_response: None,
7230 latest_inbound_scid_alias,
7231 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7232 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7234 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7235 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7237 #[cfg(any(test, fuzzing))]
7238 historical_inbound_htlc_fulfills,
7240 channel_type: channel_type.unwrap(),
7243 pending_monitor_updates: pending_monitor_updates.unwrap(),
7251 use bitcoin::blockdata::script::{Script, Builder};
7252 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7253 use bitcoin::blockdata::constants::genesis_block;
7254 use bitcoin::blockdata::opcodes;
7255 use bitcoin::network::constants::Network;
7257 use crate::ln::PaymentHash;
7258 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7260 use crate::ln::channel::InitFeatures;
7261 use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
7262 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7263 use crate::ln::features::ChannelTypeFeatures;
7264 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7265 use crate::ln::script::ShutdownScript;
7266 use crate::ln::chan_utils;
7267 use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
7268 use crate::chain::BestBlock;
7269 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7270 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7271 use crate::chain::transaction::OutPoint;
7272 use crate::routing::router::Path;
7273 use crate::util::config::UserConfig;
7274 use crate::util::enforcing_trait_impls::EnforcingSigner;
7275 use crate::util::errors::APIError;
7276 use crate::util::test_utils;
7277 use crate::util::test_utils::OnGetShutdownScriptpubkey;
7278 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7279 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7280 use bitcoin::secp256k1::{SecretKey,PublicKey};
7281 use bitcoin::hashes::sha256::Hash as Sha256;
7282 use bitcoin::hashes::Hash;
7283 use bitcoin::hash_types::WPubkeyHash;
7284 use bitcoin::PackedLockTime;
7285 use bitcoin::util::address::WitnessVersion;
7286 use crate::prelude::*;
7288 struct TestFeeEstimator {
7291 impl FeeEstimator for TestFeeEstimator {
7292 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7298 fn test_max_funding_satoshis_no_wumbo() {
7299 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7300 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7301 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7305 fn test_no_fee_check_overflow() {
7306 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
7307 // arithmetic, causing a panic with debug assertions enabled.
7308 let fee_est = TestFeeEstimator { fee_est: 42 };
7309 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7310 assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
7311 u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
7315 signer: InMemorySigner,
7318 impl EntropySource for Keys {
7319 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7322 impl SignerProvider for Keys {
7323 type Signer = InMemorySigner;
7325 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7326 self.signer.channel_keys_id()
7329 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
7333 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
7335 fn get_destination_script(&self) -> Result<Script, ()> {
7336 let secp_ctx = Secp256k1::signing_only();
7337 let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7338 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7339 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
7342 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7343 let secp_ctx = Secp256k1::signing_only();
7344 let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7345 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7349 #[cfg(not(feature = "grind_signatures"))]
7350 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7351 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
7355 fn upfront_shutdown_script_incompatibility() {
7356 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7357 let non_v0_segwit_shutdown_script =
7358 ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
7360 let seed = [42; 32];
7361 let network = Network::Testnet;
7362 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7363 keys_provider.expect(OnGetShutdownScriptpubkey {
7364 returns: non_v0_segwit_shutdown_script.clone(),
7367 let secp_ctx = Secp256k1::new();
7368 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7369 let config = UserConfig::default();
7370 match Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
7371 Err(APIError::IncompatibleShutdownScript { script }) => {
7372 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7374 Err(e) => panic!("Unexpected error: {:?}", e),
7375 Ok(_) => panic!("Expected error"),
7379 // Check that, during channel creation, we use the same feerate in the open channel message
7380 // as we do in the Channel object creation itself.
7382 fn test_open_channel_msg_fee() {
7383 let original_fee = 253;
7384 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7385 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7386 let secp_ctx = Secp256k1::new();
7387 let seed = [42; 32];
7388 let network = Network::Testnet;
7389 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7391 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7392 let config = UserConfig::default();
7393 let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7395 // Now change the fee so we can check that the fee in the open_channel message is the
7396 // same as the old fee.
7397 fee_est.fee_est = 500;
7398 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7399 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7403 fn test_holder_vs_counterparty_dust_limit() {
7404 // Test that when calculating the local and remote commitment transaction fees, the correct
7405 // dust limits are used.
7406 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7407 let secp_ctx = Secp256k1::new();
7408 let seed = [42; 32];
7409 let network = Network::Testnet;
7410 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7411 let logger = test_utils::TestLogger::new();
7413 // Go through the flow of opening a channel between two nodes, making sure
7414 // they have different dust limits.
7416 // Create Node A's channel pointing to Node B's pubkey
7417 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7418 let config = UserConfig::default();
7419 let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7421 // Create Node B's channel by receiving Node A's open_channel message
7422 // Make sure A's dust limit is as we expect.
7423 let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
7424 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7425 let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7427 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7428 let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7429 accept_channel_msg.dust_limit_satoshis = 546;
7430 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7431 node_a_chan.holder_dust_limit_satoshis = 1560;
7433 // Put some inbound and outbound HTLCs in A's channel.
7434 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
7435 node_a_chan.pending_inbound_htlcs.push(InboundHTLCOutput {
7437 amount_msat: htlc_amount_msat,
7438 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
7439 cltv_expiry: 300000000,
7440 state: InboundHTLCState::Committed,
7443 node_a_chan.pending_outbound_htlcs.push(OutboundHTLCOutput {
7445 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
7446 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
7447 cltv_expiry: 200000000,
7448 state: OutboundHTLCState::Committed,
7449 source: HTLCSource::OutboundRoute {
7450 path: Path { hops: Vec::new(), blinded_tail: None },
7451 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7452 first_hop_htlc_msat: 548,
7453 payment_id: PaymentId([42; 32]),
7457 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
7458 // the dust limit check.
7459 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7460 let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
7461 let local_commit_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 0, node_a_chan.opt_anchors());
7462 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
7464 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
7465 // of the HTLCs are seen to be above the dust limit.
7466 node_a_chan.channel_transaction_parameters.is_outbound_from_holder = false;
7467 let remote_commit_fee_3_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.feerate_per_kw, 3, node_a_chan.opt_anchors());
7468 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
7469 let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7470 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
7474 fn test_timeout_vs_success_htlc_dust_limit() {
7475 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
7476 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
7477 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
7478 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
7479 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
7480 let secp_ctx = Secp256k1::new();
7481 let seed = [42; 32];
7482 let network = Network::Testnet;
7483 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7485 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7486 let config = UserConfig::default();
7487 let mut chan = Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7489 let commitment_tx_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.feerate_per_kw, 0, chan.opt_anchors());
7490 let commitment_tx_fee_1_htlc = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.feerate_per_kw, 1, chan.opt_anchors());
7492 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
7493 // counted as dust when it shouldn't be.
7494 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis + 1) * 1000;
7495 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7496 let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
7497 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7499 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7500 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.holder_dust_limit_satoshis - 1) * 1000;
7501 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7502 let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
7503 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7505 chan.channel_transaction_parameters.is_outbound_from_holder = false;
7507 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
7508 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis + 1) * 1000;
7509 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
7510 let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7511 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
7513 // If swapped: this HTLC would be counted as dust when it shouldn't be.
7514 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.opt_anchors()) / 1000) + chan.counterparty_dust_limit_satoshis - 1) * 1000;
7515 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
7516 let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
7517 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
7521 fn channel_reestablish_no_updates() {
7522 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7523 let logger = test_utils::TestLogger::new();
7524 let secp_ctx = Secp256k1::new();
7525 let seed = [42; 32];
7526 let network = Network::Testnet;
7527 let best_block = BestBlock::from_network(network);
7528 let chain_hash = best_block.block_hash();
7529 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7531 // Go through the flow of opening a channel between two nodes.
7533 // Create Node A's channel pointing to Node B's pubkey
7534 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7535 let config = UserConfig::default();
7536 let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7538 // Create Node B's channel by receiving Node A's open_channel message
7539 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
7540 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7541 let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
7543 // Node B --> Node A: accept channel
7544 let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
7545 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
7547 // Node A --> Node B: funding created
7548 let output_script = node_a_chan.get_funding_redeemscript();
7549 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
7550 value: 10000000, script_pubkey: output_script.clone(),
7552 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
7553 let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
7554 let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
7556 // Node B --> Node A: funding signed
7557 let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
7559 // Now disconnect the two nodes and check that the commitment point in
7560 // Node B's channel_reestablish message is sane.
7561 node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7562 let msg = node_b_chan.get_channel_reestablish(&&logger);
7563 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7564 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7565 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7567 // Check that the commitment point in Node A's channel_reestablish message
7569 node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger);
7570 let msg = node_a_chan.get_channel_reestablish(&&logger);
7571 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
7572 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
7573 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
7577 fn test_configured_holder_max_htlc_value_in_flight() {
7578 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7579 let logger = test_utils::TestLogger::new();
7580 let secp_ctx = Secp256k1::new();
7581 let seed = [42; 32];
7582 let network = Network::Testnet;
7583 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7584 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7585 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7587 let mut config_2_percent = UserConfig::default();
7588 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
7589 let mut config_99_percent = UserConfig::default();
7590 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
7591 let mut config_0_percent = UserConfig::default();
7592 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
7593 let mut config_101_percent = UserConfig::default();
7594 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
7596 // Test that `new_outbound` creates a channel with the correct value for
7597 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7598 // which is set to the lower bound + 1 (2%) of the `channel_value`.
7599 let chan_1 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
7600 let chan_1_value_msat = chan_1.channel_value_satoshis * 1000;
7601 assert_eq!(chan_1.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
7603 // Test with the upper bound - 1 of valid values (99%).
7604 let chan_2 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
7605 let chan_2_value_msat = chan_2.channel_value_satoshis * 1000;
7606 assert_eq!(chan_2.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
7608 let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
7610 // Test that `new_from_req` creates a channel with the correct value for
7611 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
7612 // which is set to the lower bound - 1 (2%) of the `channel_value`.
7613 let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
7614 let chan_3_value_msat = chan_3.channel_value_satoshis * 1000;
7615 assert_eq!(chan_3.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
7617 // Test with the upper bound - 1 of valid values (99%).
7618 let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
7619 let chan_4_value_msat = chan_4.channel_value_satoshis * 1000;
7620 assert_eq!(chan_4.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
7622 // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
7623 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7624 let chan_5 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
7625 let chan_5_value_msat = chan_5.channel_value_satoshis * 1000;
7626 assert_eq!(chan_5.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
7628 // Test that `new_outbound` uses the upper bound of the configurable percentage values
7629 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7631 let chan_6 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
7632 let chan_6_value_msat = chan_6.channel_value_satoshis * 1000;
7633 assert_eq!(chan_6.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
7635 // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
7636 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
7637 let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
7638 let chan_7_value_msat = chan_7.channel_value_satoshis * 1000;
7639 assert_eq!(chan_7.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
7641 // Test that `new_from_req` uses the upper bound of the configurable percentage values
7642 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
7644 let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
7645 let chan_8_value_msat = chan_8.channel_value_satoshis * 1000;
7646 assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
7650 fn test_configured_holder_selected_channel_reserve_satoshis() {
7652 // Test that `new_outbound` and `new_from_req` create a channel with the correct
7653 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
7654 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
7656 // Test with valid but unreasonably high channel reserves
7657 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
7658 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
7659 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
7661 // Test with calculated channel reserve less than lower bound
7662 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7663 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
7665 // Test with invalid channel reserves since sum of both is greater than or equal
7667 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
7668 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
7671 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
7672 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
7673 let logger = test_utils::TestLogger::new();
7674 let secp_ctx = Secp256k1::new();
7675 let seed = [42; 32];
7676 let network = Network::Testnet;
7677 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7678 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7679 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7682 let mut outbound_node_config = UserConfig::default();
7683 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7684 let chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
7686 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
7687 assert_eq!(chan.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
7689 let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
7690 let mut inbound_node_config = UserConfig::default();
7691 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
7693 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
7694 let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
7696 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
7698 assert_eq!(chan_inbound_node.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
7699 assert_eq!(chan_inbound_node.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
7701 // Channel Negotiations failed
7702 let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
7703 assert!(result.is_err());
7708 fn channel_update() {
7709 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7710 let secp_ctx = Secp256k1::new();
7711 let seed = [42; 32];
7712 let network = Network::Testnet;
7713 let chain_hash = genesis_block(network).header.block_hash();
7714 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7716 // Create a channel.
7717 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7718 let config = UserConfig::default();
7719 let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
7720 assert!(node_a_chan.counterparty_forwarding_info.is_none());
7721 assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default
7722 assert!(node_a_chan.counterparty_forwarding_info().is_none());
7724 // Make sure that receiving a channel update will update the Channel as expected.
7725 let update = ChannelUpdate {
7726 contents: UnsignedChannelUpdate {
7728 short_channel_id: 0,
7731 cltv_expiry_delta: 100,
7732 htlc_minimum_msat: 5,
7733 htlc_maximum_msat: MAX_VALUE_MSAT,
7735 fee_proportional_millionths: 11,
7736 excess_data: Vec::new(),
7738 signature: Signature::from(unsafe { FFISignature::new() })
7740 node_a_chan.channel_update(&update).unwrap();
7742 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
7743 // change our official htlc_minimum_msat.
7744 assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1);
7745 match node_a_chan.counterparty_forwarding_info() {
7747 assert_eq!(info.cltv_expiry_delta, 100);
7748 assert_eq!(info.fee_base_msat, 110);
7749 assert_eq!(info.fee_proportional_millionths, 11);
7751 None => panic!("expected counterparty forwarding info to be Some")
7755 #[cfg(feature = "_test_vectors")]
7757 fn outbound_commitment_test() {
7758 use bitcoin::util::sighash;
7759 use bitcoin::consensus::encode::serialize;
7760 use bitcoin::blockdata::transaction::EcdsaSighashType;
7761 use bitcoin::hashes::hex::FromHex;
7762 use bitcoin::hash_types::Txid;
7763 use bitcoin::secp256k1::Message;
7764 use crate::sign::EcdsaChannelSigner;
7765 use crate::ln::PaymentPreimage;
7766 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
7767 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
7768 use crate::util::logger::Logger;
7769 use crate::sync::Arc;
7771 // Test vectors from BOLT 3 Appendices C and F (anchors):
7772 let feeest = TestFeeEstimator{fee_est: 15000};
7773 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
7774 let secp_ctx = Secp256k1::new();
7776 let mut signer = InMemorySigner::new(
7778 SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
7779 SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
7780 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7781 SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
7782 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
7784 // These aren't set in the test vectors:
7785 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
7791 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
7792 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
7793 let keys_provider = Keys { signer: signer.clone() };
7795 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7796 let mut config = UserConfig::default();
7797 config.channel_handshake_config.announced_channel = false;
7798 let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
7799 chan.holder_dust_limit_satoshis = 546;
7800 chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
7802 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
7804 let counterparty_pubkeys = ChannelPublicKeys {
7805 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7806 revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
7807 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
7808 delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
7809 htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
7811 chan.channel_transaction_parameters.counterparty_parameters = Some(
7812 CounterpartyChannelTransactionParameters {
7813 pubkeys: counterparty_pubkeys.clone(),
7814 selected_contest_delay: 144
7816 chan.channel_transaction_parameters.funding_outpoint = Some(funding_info);
7817 signer.provide_channel_parameters(&chan.channel_transaction_parameters);
7819 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
7820 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7822 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
7823 hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
7825 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
7826 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
7828 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
7829 // derived from a commitment_seed, so instead we copy it here and call
7830 // build_commitment_transaction.
7831 let delayed_payment_base = &chan.holder_signer.pubkeys().delayed_payment_basepoint;
7832 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
7833 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
7834 let htlc_basepoint = &chan.holder_signer.pubkeys().htlc_basepoint;
7835 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
7837 macro_rules! test_commitment {
7838 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7839 chan.channel_transaction_parameters.opt_anchors = None;
7840 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
7844 macro_rules! test_commitment_with_anchors {
7845 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
7846 chan.channel_transaction_parameters.opt_anchors = Some(());
7847 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
7851 macro_rules! test_commitment_common {
7852 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
7853 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
7855 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
7856 let mut commitment_stats = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
7858 let htlcs = commitment_stats.htlcs_included.drain(..)
7859 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
7861 (commitment_stats.tx, htlcs)
7863 let trusted_tx = commitment_tx.trust();
7864 let unsigned_tx = trusted_tx.built_transaction();
7865 let redeemscript = chan.get_funding_redeemscript();
7866 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
7867 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis);
7868 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
7869 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
7871 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
7872 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
7873 let mut counterparty_htlc_sigs = Vec::new();
7874 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
7876 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7877 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
7878 counterparty_htlc_sigs.push(remote_signature);
7880 assert_eq!(htlcs.len(), per_htlc.len());
7882 let holder_commitment_tx = HolderCommitmentTransaction::new(
7883 commitment_tx.clone(),
7884 counterparty_signature,
7885 counterparty_htlc_sigs,
7886 &chan.holder_signer.pubkeys().funding_pubkey,
7887 chan.counterparty_funding_pubkey()
7889 let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
7890 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
7892 let funding_redeemscript = chan.get_funding_redeemscript();
7893 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
7894 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
7896 // ((htlc, counterparty_sig), (index, holder_sig))
7897 let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
7900 log_trace!(logger, "verifying htlc {}", $htlc_idx);
7901 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
7903 let ref htlc = htlcs[$htlc_idx];
7904 let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.feerate_per_kw,
7905 chan.get_counterparty_selected_contest_delay().unwrap(),
7906 &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
7907 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
7908 let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
7909 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
7910 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
7912 let mut preimage: Option<PaymentPreimage> = None;
7915 let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
7916 if out == htlc.payment_hash {
7917 preimage = Some(PaymentPreimage([i; 32]));
7921 assert!(preimage.is_some());
7924 let htlc_sig = htlc_sig_iter.next().unwrap();
7925 let num_anchors = if $opt_anchors { 2 } else { 0 };
7926 assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
7928 let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
7929 assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
7930 let index = (htlc_sig.1).0;
7931 let channel_parameters = chan.channel_transaction_parameters.as_holder_broadcastable();
7932 let trusted_tx = holder_commitment_tx.trust();
7933 log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))));
7934 assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
7935 hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
7937 assert!(htlc_sig_iter.next().is_none());
7941 // anchors: simple commitment tx with no HTLCs and single anchor
7942 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
7943 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
7944 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7946 // simple commitment tx with no HTLCs
7947 chan.value_to_self_msat = 7000000000;
7949 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
7950 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
7951 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7953 // anchors: simple commitment tx with no HTLCs
7954 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
7955 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
7956 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
7958 chan.pending_inbound_htlcs.push({
7959 let mut out = InboundHTLCOutput{
7961 amount_msat: 1000000,
7963 payment_hash: PaymentHash([0; 32]),
7964 state: InboundHTLCState::Committed,
7966 out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
7969 chan.pending_inbound_htlcs.push({
7970 let mut out = InboundHTLCOutput{
7972 amount_msat: 2000000,
7974 payment_hash: PaymentHash([0; 32]),
7975 state: InboundHTLCState::Committed,
7977 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
7980 chan.pending_outbound_htlcs.push({
7981 let mut out = OutboundHTLCOutput{
7983 amount_msat: 2000000,
7985 payment_hash: PaymentHash([0; 32]),
7986 state: OutboundHTLCState::Committed,
7987 source: HTLCSource::dummy(),
7989 out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
7992 chan.pending_outbound_htlcs.push({
7993 let mut out = OutboundHTLCOutput{
7995 amount_msat: 3000000,
7997 payment_hash: PaymentHash([0; 32]),
7998 state: OutboundHTLCState::Committed,
7999 source: HTLCSource::dummy(),
8001 out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8004 chan.pending_inbound_htlcs.push({
8005 let mut out = InboundHTLCOutput{
8007 amount_msat: 4000000,
8009 payment_hash: PaymentHash([0; 32]),
8010 state: InboundHTLCState::Committed,
8012 out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8016 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8017 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8018 chan.feerate_per_kw = 0;
8020 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8021 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8022 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8025 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8026 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8027 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8030 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8031 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8032 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8035 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8036 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8037 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8040 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8041 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8042 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8045 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8046 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8047 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8050 // commitment tx with seven outputs untrimmed (maximum feerate)
8051 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8052 chan.feerate_per_kw = 647;
8054 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8055 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8056 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8059 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8060 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8061 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8064 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8065 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8066 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8069 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8070 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8071 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8074 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8075 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8076 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8079 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8080 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8081 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8084 // commitment tx with six outputs untrimmed (minimum feerate)
8085 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8086 chan.feerate_per_kw = 648;
8088 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8089 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8090 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8093 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8094 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8095 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8098 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8099 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8100 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8103 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8104 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8105 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8108 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8109 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8110 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8113 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8114 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8115 chan.feerate_per_kw = 645;
8116 chan.holder_dust_limit_satoshis = 1001;
8118 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8119 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8120 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8123 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8124 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8125 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8128 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8129 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8130 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8133 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8134 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8135 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8138 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8139 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8140 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8143 // commitment tx with six outputs untrimmed (maximum feerate)
8144 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8145 chan.feerate_per_kw = 2069;
8146 chan.holder_dust_limit_satoshis = 546;
8148 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8149 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8150 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8153 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8154 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8155 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8158 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8159 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8160 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8163 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8164 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8165 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8168 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8169 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8170 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8173 // commitment tx with five outputs untrimmed (minimum feerate)
8174 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8175 chan.feerate_per_kw = 2070;
8177 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8178 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8179 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8182 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8183 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8184 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8187 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8188 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8189 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8192 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8193 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8194 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8197 // commitment tx with five outputs untrimmed (maximum feerate)
8198 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8199 chan.feerate_per_kw = 2194;
8201 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8202 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8203 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8206 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8207 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8208 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8211 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8212 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8213 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8216 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8217 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8218 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8221 // commitment tx with four outputs untrimmed (minimum feerate)
8222 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8223 chan.feerate_per_kw = 2195;
8225 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8226 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8227 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8230 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8231 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8232 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8235 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8236 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8237 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8240 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8241 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8242 chan.feerate_per_kw = 2185;
8243 chan.holder_dust_limit_satoshis = 2001;
8245 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8246 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8247 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8250 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8251 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8252 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8255 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8256 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8257 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8260 // commitment tx with four outputs untrimmed (maximum feerate)
8261 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8262 chan.feerate_per_kw = 3702;
8263 chan.holder_dust_limit_satoshis = 546;
8265 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8266 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8267 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8270 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8271 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8272 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8275 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8276 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8277 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8280 // commitment tx with three outputs untrimmed (minimum feerate)
8281 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8282 chan.feerate_per_kw = 3703;
8284 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8285 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8286 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8289 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8290 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8291 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8294 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8295 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8296 chan.feerate_per_kw = 3687;
8297 chan.holder_dust_limit_satoshis = 3001;
8299 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8300 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8301 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8304 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8305 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8306 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8309 // commitment tx with three outputs untrimmed (maximum feerate)
8310 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8311 chan.feerate_per_kw = 4914;
8312 chan.holder_dust_limit_satoshis = 546;
8314 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8315 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8316 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8319 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8320 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8321 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8324 // commitment tx with two outputs untrimmed (minimum feerate)
8325 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8326 chan.feerate_per_kw = 4915;
8327 chan.holder_dust_limit_satoshis = 546;
8329 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8330 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8331 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8333 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8334 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8335 chan.feerate_per_kw = 4894;
8336 chan.holder_dust_limit_satoshis = 4001;
8338 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8339 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8340 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8342 // commitment tx with two outputs untrimmed (maximum feerate)
8343 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8344 chan.feerate_per_kw = 9651180;
8345 chan.holder_dust_limit_satoshis = 546;
8347 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8348 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8349 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8351 // commitment tx with one output untrimmed (minimum feerate)
8352 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8353 chan.feerate_per_kw = 9651181;
8355 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8356 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8357 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8359 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8360 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8361 chan.feerate_per_kw = 6216010;
8362 chan.holder_dust_limit_satoshis = 4001;
8364 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8365 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
8366 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8368 // commitment tx with fee greater than funder amount
8369 chan.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8370 chan.feerate_per_kw = 9651936;
8371 chan.holder_dust_limit_satoshis = 546;
8373 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8374 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8375 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8377 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
8378 chan.value_to_self_msat = 7_000_000_000 - 2_000_000;
8379 chan.feerate_per_kw = 253;
8380 chan.pending_inbound_htlcs.clear();
8381 chan.pending_inbound_htlcs.push({
8382 let mut out = InboundHTLCOutput{
8384 amount_msat: 2000000,
8386 payment_hash: PaymentHash([0; 32]),
8387 state: InboundHTLCState::Committed,
8389 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8392 chan.pending_outbound_htlcs.clear();
8393 chan.pending_outbound_htlcs.push({
8394 let mut out = OutboundHTLCOutput{
8396 amount_msat: 5000001,
8398 payment_hash: PaymentHash([0; 32]),
8399 state: OutboundHTLCState::Committed,
8400 source: HTLCSource::dummy(),
8402 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8405 chan.pending_outbound_htlcs.push({
8406 let mut out = OutboundHTLCOutput{
8408 amount_msat: 5000000,
8410 payment_hash: PaymentHash([0; 32]),
8411 state: OutboundHTLCState::Committed,
8412 source: HTLCSource::dummy(),
8414 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
8418 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
8419 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
8420 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8423 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
8424 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
8425 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8427 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
8428 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
8429 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
8431 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
8432 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
8433 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
8436 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
8437 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
8438 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8441 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
8442 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
8443 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8445 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
8446 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
8447 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
8449 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
8450 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
8451 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
8456 fn test_per_commitment_secret_gen() {
8457 // Test vectors from BOLT 3 Appendix D:
8459 let mut seed = [0; 32];
8460 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
8461 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8462 hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
8464 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
8465 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
8466 hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
8468 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
8469 hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
8471 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
8472 hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
8474 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
8475 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
8476 hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
8480 fn test_key_derivation() {
8481 // Test vectors from BOLT 3 Appendix E:
8482 let secp_ctx = Secp256k1::new();
8484 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
8485 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8487 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
8488 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
8490 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8491 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
8493 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8494 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
8496 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
8497 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
8499 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
8500 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
8502 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
8503 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
8507 fn test_zero_conf_channel_type_support() {
8508 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8509 let secp_ctx = Secp256k1::new();
8510 let seed = [42; 32];
8511 let network = Network::Testnet;
8512 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8513 let logger = test_utils::TestLogger::new();
8515 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8516 let config = UserConfig::default();
8517 let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider,
8518 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8520 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8521 channel_type_features.set_zero_conf_required();
8523 let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
8524 open_channel_msg.channel_type = Some(channel_type_features);
8525 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8526 let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
8527 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8528 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
8529 assert!(res.is_ok());
8534 fn test_supports_anchors_zero_htlc_tx_fee() {
8535 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
8536 // resulting `channel_type`.
8537 let secp_ctx = Secp256k1::new();
8538 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8539 let network = Network::Testnet;
8540 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8541 let logger = test_utils::TestLogger::new();
8543 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8544 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8546 let mut config = UserConfig::default();
8547 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8549 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
8550 // need to signal it.
8551 let channel_a = Channel::<EnforcingSigner>::new_outbound(
8552 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8553 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
8556 assert!(!channel_a.channel_type.supports_anchors_zero_fee_htlc_tx());
8558 let mut expected_channel_type = ChannelTypeFeatures::empty();
8559 expected_channel_type.set_static_remote_key_required();
8560 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
8562 let channel_a = Channel::<EnforcingSigner>::new_outbound(
8563 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8564 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8567 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8568 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8569 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8570 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8571 &open_channel_msg, 7, &config, 0, &&logger, 42
8574 assert_eq!(channel_a.channel_type, expected_channel_type);
8575 assert_eq!(channel_b.channel_type, expected_channel_type);
8580 fn test_rejects_implicit_simple_anchors() {
8581 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
8582 // each side's `InitFeatures`, it is rejected.
8583 let secp_ctx = Secp256k1::new();
8584 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8585 let network = Network::Testnet;
8586 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8587 let logger = test_utils::TestLogger::new();
8589 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8590 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8592 let config = UserConfig::default();
8594 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8595 let static_remote_key_required: u64 = 1 << 12;
8596 let simple_anchors_required: u64 = 1 << 20;
8597 let raw_init_features = static_remote_key_required | simple_anchors_required;
8598 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
8600 let channel_a = Channel::<EnforcingSigner>::new_outbound(
8601 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8602 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8605 // Set `channel_type` to `None` to force the implicit feature negotiation.
8606 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8607 open_channel_msg.channel_type = None;
8609 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
8610 // `static_remote_key`, it will fail the channel.
8611 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8612 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8613 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
8614 &open_channel_msg, 7, &config, 0, &&logger, 42
8616 assert!(channel_b.is_err());
8621 fn test_rejects_simple_anchors_channel_type() {
8622 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
8624 let secp_ctx = Secp256k1::new();
8625 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8626 let network = Network::Testnet;
8627 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
8628 let logger = test_utils::TestLogger::new();
8630 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
8631 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
8633 let config = UserConfig::default();
8635 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
8636 let static_remote_key_required: u64 = 1 << 12;
8637 let simple_anchors_required: u64 = 1 << 20;
8638 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
8639 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8640 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
8641 assert!(simple_anchors_init.requires_unknown_bits());
8642 assert!(simple_anchors_channel_type.requires_unknown_bits());
8644 // First, we'll try to open a channel between A and B where A requests a channel type for
8645 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
8646 // B as it's not supported by LDK.
8647 let channel_a = Channel::<EnforcingSigner>::new_outbound(
8648 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
8649 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
8652 let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8653 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8655 let res = Channel::<EnforcingSigner>::new_from_req(
8656 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8657 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
8658 &open_channel_msg, 7, &config, 0, &&logger, 42
8660 assert!(res.is_err());
8662 // Then, we'll try to open another channel where A requests a channel type for
8663 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
8664 // original `option_anchors` feature, which should be rejected by A as it's not supported by
8666 let mut channel_a = Channel::<EnforcingSigner>::new_outbound(
8667 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
8668 10000000, 100000, 42, &config, 0, 42
8671 let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
8673 let channel_b = Channel::<EnforcingSigner>::new_from_req(
8674 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
8675 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
8676 &open_channel_msg, 7, &config, 0, &&logger, 42
8679 let mut accept_channel_msg = channel_b.get_accept_channel_message();
8680 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
8682 let res = channel_a.accept_channel(
8683 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
8685 assert!(res.is_err());