use prelude::*;
use core::{cmp,mem,fmt};
use core::ops::Deref;
-#[cfg(any(test, feature = "fuzztarget"))]
-use std::sync::Mutex;
+#[cfg(any(test, feature = "fuzztarget", debug_assertions))]
+use sync::Mutex;
use bitcoin::hashes::hex::ToHex;
use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0;
pub cltv_expiry_delta: u16,
}
+/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
+/// description
+enum UpdateFulfillFetch {
+ NewClaim {
+ monitor_update: ChannelMonitorUpdate,
+ htlc_value_msat: u64,
+ msg: Option<msgs::UpdateFulfillHTLC>,
+ },
+ DuplicateClaim {},
+}
+
+/// The return type of get_update_fulfill_htlc_and_commit.
+pub enum UpdateFulfillCommitFetch {
+ /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
+ /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
+ /// previously placed in the holding cell (and has since been removed).
+ NewClaim {
+ /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
+ monitor_update: ChannelMonitorUpdate,
+ /// The value of the HTLC which was claimed, in msat.
+ htlc_value_msat: u64,
+ /// The update_fulfill message and commitment_signed message (if the claim was not placed
+ /// in the holding cell).
+ msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
+ },
+ /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
+ /// or has been forgotten (presumably previously claimed).
+ DuplicateClaim {},
+}
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<Signer: Sign> {
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) config: ChannelConfig,
+ #[cfg(not(any(test, feature = "_test_utils")))]
config: ChannelConfig,
user_id: u64,
#[cfg(debug_assertions)]
/// Max to_local and to_remote outputs in a locally-generated commitment transaction
- holder_max_commitment_tx_output: ::std::sync::Mutex<(u64, u64)>,
+ holder_max_commitment_tx_output: Mutex<(u64, u64)>,
#[cfg(debug_assertions)]
/// Max to_local and to_remote outputs in a remote-generated commitment transaction
- counterparty_max_commitment_tx_output: ::std::sync::Mutex<(u64, u64)>,
+ counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
///
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
+ // corresponding HTLC on the inbound path. If, then, the outbound path channel is
+ // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
+ // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
+ // is fine, but as a sanity check in our failure to generate the second claim, we check here
+ // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
+ historical_inbound_htlc_fulfills: HashSet<u64>,
}
#[cfg(any(test, feature = "fuzztarget"))]
monitor_pending_failures: Vec::new(),
#[cfg(debug_assertions)]
- holder_max_commitment_tx_output: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
#[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
last_sent_closing_fee: None,
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
})
}
monitor_pending_failures: Vec::new(),
#[cfg(debug_assertions)]
- holder_max_commitment_tx_output: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
#[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
last_sent_closing_fee: None,
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
};
Ok(chan)
make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
}
- /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
- /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
- /// return Ok(_) if debug assertions are turned on or preconditions are met.
- ///
- /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
- /// but then have a reorg which settles on an HTLC-failure on chain.
- fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
// Either ChannelFunded got set (which means it won't be unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
// these, but for now we just have to treat them as normal.
let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
if htlc.htlc_id == htlc_id_arg {
assert_eq!(htlc.payment_hash, payment_hash_calc);
if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
} else {
log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
}
- debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
- return Ok((None, None));
+ return UpdateFulfillFetch::DuplicateClaim {};
},
_ => {
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
}
}
pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
break;
}
}
if pending_idx == core::usize::MAX {
- return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ #[cfg(any(test, feature = "fuzztarget"))]
+ // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+ // this is simply a duplicate claim, not previously failed and we lost funds.
+ debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
}
// Now update local state:
if htlc_id_arg == htlc_id {
// Make sure we don't leave latest_monitor_update_id incremented here:
self.latest_monitor_update_id -= 1;
- debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled");
- return Ok((None, None));
+ #[cfg(any(test, feature = "fuzztarget"))]
+ debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
}
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
// TODO: We may actually be able to switch to a fulfill here, though its
// rare enough it may not be worth the complexity burden.
debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
- return Ok((None, Some(monitor_update)));
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
},
_ => {}
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
- return Ok((None, Some(monitor_update)));
+ #[cfg(any(test, feature = "fuzztarget"))]
+ self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
+ #[cfg(any(test, feature = "fuzztarget"))]
+ self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
{
let htlc = &mut self.pending_inbound_htlcs[pending_idx];
if let InboundHTLCState::Committed = htlc.state {
} else {
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return Ok((None, Some(monitor_update)));
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
}
- Ok((Some(msgs::UpdateFulfillHTLC {
- channel_id: self.channel_id(),
- htlc_id: htlc_id_arg,
- payment_preimage: payment_preimage_arg,
- }), Some(monitor_update)))
+ UpdateFulfillFetch::NewClaim {
+ monitor_update,
+ htlc_value_msat,
+ msg: Some(msgs::UpdateFulfillHTLC {
+ channel_id: self.channel_id(),
+ htlc_id: htlc_id_arg,
+ payment_preimage: payment_preimage_arg,
+ }),
+ }
}
- pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
- match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? {
- (Some(update_fulfill_htlc), Some(mut monitor_update)) => {
- let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?;
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
+ let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
+ Err(e) => return Err((e, monitor_update)),
+ Ok(res) => res
+ };
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
- },
- (Some(update_fulfill_htlc), None) => {
- let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?;
- Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
+ Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
},
- (None, Some(monitor_update)) => Ok((None, Some(monitor_update))),
- (None, None) => Ok((None, None))
+ UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
+ Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
+ UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
}
}
- /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
- /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
- /// return Ok(_) if debug assertions are turned on or preconditions are met.
- ///
- /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
- /// but then have a reorg which settles on an HTLC-failure on chain.
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return
+ /// Ok(_) if debug assertions are turned on or preconditions are met.
pub fn get_update_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
if htlc.htlc_id == htlc_id_arg {
match htlc.state {
InboundHTLCState::Committed => {},
- InboundHTLCState::LocalRemoved(_) => {
- debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled");
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
return Ok(None);
},
_ => {
}
}
if pending_idx == core::usize::MAX {
- return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ #[cfg(any(test, feature = "fuzztarget"))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
}
// Now update local state:
match pending_update {
&HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
if htlc_id_arg == htlc_id {
- debug_assert!(false, "Tried to fail an HTLC that was already fulfilled");
- return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ #[cfg(any(test, feature = "fuzztarget"))]
+ debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
}
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
/// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
#[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
for htlc in self.pending_outbound_htlcs.iter_mut() {
if htlc.htlc_id == htlc_id {
match check_preimage {
OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
}
- return Ok(&htlc.source);
+ return Ok(htlc);
}
}
Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
}
let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner());
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone())
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
}
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
}
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
- match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
- Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => {
- update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
- if let Some(mut additional_monitor_update) = additional_monitor_update_opt {
- monitor_update.updates.append(&mut additional_monitor_update.updates);
- }
- },
- Err(e) => {
- if let ChannelError::Ignore(_) = e {}
- else {
- panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC");
- }
- }
- }
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let (update_fulfill_msg_option, mut additional_monitor_update) =
+ if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+ (msg, monitor_update)
+ } else { unreachable!() };
+ update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) {
- Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()),
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `get_update_fail_htlc`) then generating the fail message itself
+ // must not fail - we should never end up in a state where we
+ // double-fail an HTLC or fail-then-claim an HTLC as it indicates
+ // we didn't wait for a full revocation before failing.
+ update_fail_htlcs.push(update_fail_msg_option.unwrap())
+ },
Err(e) => {
if let ChannelError::Ignore(_) = e {}
else {
if !self.is_outbound() {
// Check that we won't violate the remote channel reserve by adding this HTLC.
let counterparty_balance_msat = self.channel_value_satoshis * 1000 - self.value_to_self_msat;
- let holder_selected_chan_reserve_msat = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
+ let holder_selected_chan_reserve_msat = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000;
let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None);
if counterparty_balance_msat < holder_selected_chan_reserve_msat + counterparty_commit_tx_fee_msat {
self.counterparty_dust_limit_satoshis.write(writer)?;
self.holder_dust_limit_satoshis.write(writer)?;
self.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
+
+ // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
+
self.counterparty_htlc_minimum_msat.write(writer)?;
self.holder_htlc_minimum_msat.write(writer)?;
self.counterparty_max_accepted_htlcs.write(writer)?;
+
+ // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
self.minimum_depth.unwrap_or(0).write(writer)?;
match &self.counterparty_forwarding_info {
self.channel_update_status.write(writer)?;
+ #[cfg(any(test, feature = "fuzztarget"))]
+ (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
+ #[cfg(any(test, feature = "fuzztarget"))]
+ for htlc in self.historical_inbound_htlc_fulfills.iter() {
+ htlc.write(writer)?;
+ }
+
write_tlv_fields!(writer, {
(0, self.announcement_sigs, option),
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
let counterparty_dust_limit_satoshis = Readable::read(reader)?;
let holder_dust_limit_satoshis = Readable::read(reader)?;
let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
- let mut counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
- if counterparty_selected_channel_reserve_satoshis == Some(0) {
- // Versions up to 0.0.98 had counterparty_selected_channel_reserve_satoshis as a
- // non-option, writing 0 for what we now consider None.
- counterparty_selected_channel_reserve_satoshis = None;
+ let mut counterparty_selected_channel_reserve_satoshis = None;
+ if ver == 1 {
+ // Read the old serialization from version 0.0.98.
+ counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
+ } else {
+ // Read the 8 bytes of backwards-compatibility data.
+ let _dummy: u64 = Readable::read(reader)?;
}
let counterparty_htlc_minimum_msat = Readable::read(reader)?;
let holder_htlc_minimum_msat = Readable::read(reader)?;
let counterparty_max_accepted_htlcs = Readable::read(reader)?;
- let mut minimum_depth = Some(Readable::read(reader)?);
- if minimum_depth == Some(0) {
- // Versions up to 0.0.98 had minimum_depth as a non-option, writing 0 for what we now
- // consider None.
- minimum_depth = None;
+
+ let mut minimum_depth = None;
+ if ver == 1 {
+ // Read the old serialization from version 0.0.98.
+ minimum_depth = Some(Readable::read(reader)?);
+ } else {
+ // Read the 4 bytes of backwards-compatibility data.
+ let _dummy: u32 = Readable::read(reader)?;
}
let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
let channel_update_status = Readable::read(reader)?;
+ #[cfg(any(test, feature = "fuzztarget"))]
+ let mut historical_inbound_htlc_fulfills = HashSet::new();
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ let htlc_fulfills_len: u64 = Readable::read(reader)?;
+ for _ in 0..htlc_fulfills_len {
+ assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
+ }
+ }
+
let mut announcement_sigs = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
feerate_per_kw,
#[cfg(debug_assertions)]
- holder_max_commitment_tx_output: ::std::sync::Mutex::new((0, 0)),
+ holder_max_commitment_tx_output: Mutex::new((0, 0)),
#[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: ::std::sync::Mutex::new((0, 0)),
+ counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
last_sent_closing_fee,
next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
workaround_lnd_bug_4006: None,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ historical_inbound_htlc_fulfills,
})
}
}
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
use bitcoin::hash_types::{Txid, WPubkeyHash};
- use std::sync::Arc;
+ use sync::Arc;
use prelude::*;
struct TestFeeEstimator {