X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=10641d0abbff75aeba310bcdffc1455db54dd18e;hb=0dfcacd22c23f69b6526c9c6507d21427a2b7ccb;hp=0d5ffb038c9585853270e507b4664b0d3e60efaf;hpb=a0a3a6b204395a63647a7914f35077b117315b1d;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0d5ffb03..10641d0a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -41,6 +41,7 @@ use util::errors::APIError; use util::config::{UserConfig,ChannelConfig}; use util::scid_utils::scid_from_parts; +use io; use prelude::*; use core::{cmp,mem,fmt}; use core::ops::Deref; @@ -301,6 +302,33 @@ pub struct CounterpartyForwardingInfo { pub cltv_expiry_delta: u16, } +/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for +/// description +enum UpdateFulfillFetch { + NewClaim { + monitor_update: ChannelMonitorUpdate, + msg: Option, + }, + DuplicateClaim {}, +} + +/// The return type of get_update_fulfill_htlc_and_commit. +pub enum UpdateFulfillCommitFetch { + /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed + /// it in the holding cell, or re-generated the update_fulfill message after the same claim was + /// previously placed in the holding cell (and has since been removed). + NewClaim { + /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor + monitor_update: ChannelMonitorUpdate, + /// The update_fulfill message and commitment_signed message (if the claim was not placed + /// in the holding cell). + msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, + }, + /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell + /// or has been forgotten (presumably previously claimed). + DuplicateClaim {}, +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -444,6 +472,15 @@ pub(super) struct Channel { /// /// See-also pub workaround_lnd_bug_4006: Option, + + #[cfg(any(test, feature = "fuzztarget"))] + // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the + // corresponding HTLC on the inbound path. If, then, the outbound path channel is + // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack + // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This + // is fine, but as a sanity check in our failure to generate the second claim, we check here + // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. + historical_inbound_htlc_fulfills: HashSet, } #[cfg(any(test, feature = "fuzztarget"))] @@ -644,6 +681,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }) } @@ -889,6 +929,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }; Ok(chan) @@ -1216,13 +1259,7 @@ impl Channel { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option, Option), ChannelError> where L::Target: Logger { + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelFunded got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -1248,9 +1285,9 @@ impl Channel { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } - debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled"); - return Ok((None, None)); + return UpdateFulfillFetch::DuplicateClaim {}; }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -1262,7 +1299,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and + // this is simply a duplicate claim, not previously failed and we lost funds. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; } // Now update local state: @@ -1284,8 +1325,9 @@ impl Channel { if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.latest_monitor_update_id -= 1; - debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled"); - return Ok((None, None)); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -1294,7 +1336,7 @@ impl Channel { // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } }, _ => {} @@ -1304,52 +1346,58 @@ impl Channel { self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - return Ok((None, Some(monitor_update))); + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } - Ok((Some(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), Some(monitor_update))) + UpdateFulfillFetch::NewClaim { + monitor_update, + msg: Some(msgs::UpdateFulfillHTLC { + channel_id: self.channel_id(), + htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + }), + } } - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option), ChannelError> where L::Target: Logger { - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? { - (Some(update_fulfill_htlc), Some(mut monitor_update)) => { - let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?; + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => { + let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) { + Err(e) => return Err((e, monitor_update)), + Ok(res) => res + }; // send_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) - }, - (Some(update_fulfill_htlc), None) => { - let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?; - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) }) }, - (None, Some(monitor_update)) => Ok((None, Some(monitor_update))), - (None, None) => Ok((None, None)) + UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }), + UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), } } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return + /// Ok(_) if debug assertions are turned on or preconditions are met. pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); @@ -1365,8 +1413,11 @@ impl Channel { if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(_) => { - debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled"); + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + } return Ok(None); }, _ => { @@ -1378,7 +1429,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this + // is simply a duplicate fail, not previously failed and we failed-back too early. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } // Now update local state: @@ -1387,8 +1442,9 @@ impl Channel { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already fulfilled"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -2435,24 +2491,28 @@ impl Channel { } }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { - match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { - Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => { - update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); - if let Some(mut additional_monitor_update) = additional_monitor_update_opt { - monitor_update.updates.append(&mut additional_monitor_update.updates); - } - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC"); - } - } - } + // If an HTLC claim was previously added to the holding cell (via + // `get_update_fulfill_htlc`, then generating the claim message itself must + // not fail - any in between attempts to claim the HTLC will have resulted + // in it hitting the holding cell again and we cannot change the state of a + // holding cell HTLC from fulfill to anything else. + let (update_fulfill_msg_option, mut additional_monitor_update) = + if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + (msg, monitor_update) + } else { unreachable!() }; + update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); + monitor_update.updates.append(&mut additional_monitor_update.updates); }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) { - Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()), + Ok(update_fail_msg_option) => { + // If an HTLC failure was previously added to the holding cell (via + // `get_update_fail_htlc`) then generating the fail message itself + // must not fail - we should never end up in a state where we + // double-fail an HTLC or fail-then-claim an HTLC as it indicates + // we didn't wait for a full revocation before failing. + update_fail_htlcs.push(update_fail_msg_option.unwrap()) + }, Err(e) => { if let ChannelError::Ignore(_) = e {} else { @@ -4087,7 +4147,7 @@ impl Channel { if !self.is_outbound() { // Check that we won't violate the remote channel reserve by adding this HTLC. let counterparty_balance_msat = self.channel_value_satoshis * 1000 - self.value_to_self_msat; - let holder_selected_chan_reserve_msat = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis); + let holder_selected_chan_reserve_msat = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000; let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None); if counterparty_balance_msat < holder_selected_chan_reserve_msat + counterparty_commit_tx_fee_msat { @@ -4453,7 +4513,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; ); impl Writeable for ChannelUpdateStatus { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { // We only care about writing out the current state as it was announced, ie only either // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1. @@ -4468,7 +4528,7 @@ impl Writeable for ChannelUpdateStatus { } impl Readable for ChannelUpdateStatus { - fn read(reader: &mut R) -> Result { + fn read(reader: &mut R) -> Result { Ok(match ::read(reader)? { 0 => ChannelUpdateStatus::Enabled, 1 => ChannelUpdateStatus::Disabled, @@ -4478,7 +4538,7 @@ impl Readable for ChannelUpdateStatus { } impl Writeable for Channel { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -4684,6 +4744,13 @@ impl Writeable for Channel { self.channel_update_status.write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + for htlc in self.historical_inbound_htlc_fulfills.iter() { + htlc.write(writer)?; + } + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -4704,7 +4771,7 @@ impl Writeable for Channel { const MAX_ALLOC_SIZE: usize = 64*1024; impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel where K::Target: KeysInterface { - fn read(reader: &mut R, keys_source: &'a K) -> Result { + fn read(reader: &mut R, keys_source: &'a K) -> Result { let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let user_id = Readable::read(reader)?; @@ -4893,6 +4960,16 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let channel_update_status = Readable::read(reader)?; + #[cfg(any(test, feature = "fuzztarget"))] + let mut historical_inbound_htlc_fulfills = HashSet::new(); + #[cfg(any(test, feature = "fuzztarget"))] + { + let htlc_fulfills_len: u64 = Readable::read(reader)?; + for _ in 0..htlc_fulfills_len { + assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); + } + } + let mut announcement_sigs = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -4985,6 +5062,9 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills, }) } }