X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;fp=lightning%2Fsrc%2Fln%2Fchannel.rs;h=e08b8af49a3a40f5992f8e09b77f99f8279135fd;hb=1bb9e64ebc6afaa21afcef4f71bb83054d9b2023;hp=dc2dd758a9ea97bae465a69836f79200190c97d7;hpb=0b4079df9a2d4fd7b178f7441c5fb89d79e40a4d;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dc2dd758..e08b8af4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -301,6 +301,33 @@ pub struct CounterpartyForwardingInfo { pub cltv_expiry_delta: u16, } +/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for +/// description +enum UpdateFulfillFetch { + NewClaim { + monitor_update: ChannelMonitorUpdate, + msg: Option, + }, + DuplicateClaim {}, +} + +/// The return type of get_update_fulfill_htlc_and_commit. +pub enum UpdateFulfillCommitFetch { + /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed + /// it in the holding cell, or re-generated the update_fulfill message after the same claim was + /// previously placed in the holding cell (and has since been removed). + NewClaim { + /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor + monitor_update: ChannelMonitorUpdate, + /// The update_fulfill message and commitment_signed message (if the claim was not placed + /// in the holding cell). + msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, + }, + /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell + /// or has been forgotten (presumably previously claimed). + DuplicateClaim {}, +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -444,6 +471,15 @@ pub(super) struct Channel { /// /// See-also pub workaround_lnd_bug_4006: Option, + + #[cfg(any(test, feature = "fuzztarget"))] + // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the + // corresponding HTLC on the inbound path. If, then, the outbound path channel is + // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack + // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This + // is fine, but as a sanity check in our failure to generate the second claim, we check here + // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. + historical_inbound_htlc_fulfills: HashSet, } #[cfg(any(test, feature = "fuzztarget"))] @@ -644,6 +680,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }) } @@ -889,6 +928,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }; Ok(chan) @@ -1216,13 +1258,7 @@ impl Channel { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option, Option), ChannelError> where L::Target: Logger { + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelFunded got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -1248,9 +1284,9 @@ impl Channel { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } - debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled"); - return Ok((None, None)); + return UpdateFulfillFetch::DuplicateClaim {}; }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -1262,7 +1298,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and + // this is simply a duplicate claim, not previously failed and we lost funds. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; } // Now update local state: @@ -1284,8 +1324,9 @@ impl Channel { if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.latest_monitor_update_id -= 1; - debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled"); - return Ok((None, None)); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return UpdateFulfillFetch::DuplicateClaim {}; } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -1294,7 +1335,7 @@ impl Channel { // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } }, _ => {} @@ -1304,52 +1345,58 @@ impl Channel { self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - return Ok((None, Some(monitor_update))); + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } - Ok((Some(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), Some(monitor_update))) + UpdateFulfillFetch::NewClaim { + monitor_update, + msg: Some(msgs::UpdateFulfillHTLC { + channel_id: self.channel_id(), + htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + }), + } } - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option), ChannelError> where L::Target: Logger { - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? { - (Some(update_fulfill_htlc), Some(mut monitor_update)) => { - let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?; + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => { + let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) { + Err(e) => return Err((e, monitor_update)), + Ok(res) => res + }; // send_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) - }, - (Some(update_fulfill_htlc), None) => { - let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?; - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) }) }, - (None, Some(monitor_update)) => Ok((None, Some(monitor_update))), - (None, None) => Ok((None, None)) + UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }), + UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), } } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return + /// Ok(_) if debug assertions are turned on or preconditions are met. pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); @@ -1365,8 +1412,11 @@ impl Channel { if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(_) => { - debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled"); + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + } return Ok(None); }, _ => { @@ -1378,7 +1428,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this + // is simply a duplicate fail, not previously failed and we failed-back too early. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } // Now update local state: @@ -1387,8 +1441,9 @@ impl Channel { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already fulfilled"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -2435,24 +2490,28 @@ impl Channel { } }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { - match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { - Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => { - update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); - if let Some(mut additional_monitor_update) = additional_monitor_update_opt { - monitor_update.updates.append(&mut additional_monitor_update.updates); - } - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC"); - } - } - } + // If an HTLC claim was previously added to the holding cell (via + // `get_update_fulfill_htlc`, then generating the claim message itself must + // not fail - any in between attempts to claim the HTLC will have resulted + // in it hitting the holding cell again and we cannot change the state of a + // holding cell HTLC from fulfill to anything else. + let (update_fulfill_msg_option, mut additional_monitor_update) = + if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + (msg, monitor_update) + } else { unreachable!() }; + update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); + monitor_update.updates.append(&mut additional_monitor_update.updates); }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) { - Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()), + Ok(update_fail_msg_option) => { + // If an HTLC failure was previously added to the holding cell (via + // `get_update_fail_htlc`) then generating the fail message itself + // must not fail - we should never end up in a state where we + // double-fail an HTLC or fail-then-claim an HTLC as it indicates + // we didn't wait for a full revocation before failing. + update_fail_htlcs.push(update_fail_msg_option.unwrap()) + }, Err(e) => { if let ChannelError::Ignore(_) = e {} else { @@ -4684,6 +4743,13 @@ impl Writeable for Channel { self.channel_update_status.write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + for htlc in self.historical_inbound_htlc_fulfills.iter() { + htlc.write(writer)?; + } + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -4893,6 +4959,16 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let channel_update_status = Readable::read(reader)?; + #[cfg(any(test, feature = "fuzztarget"))] + let mut historical_inbound_htlc_fulfills = HashSet::new(); + #[cfg(any(test, feature = "fuzztarget"))] + { + let htlc_fulfills_len: u64 = Readable::read(reader)?; + for _ in 0..htlc_fulfills_len { + assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); + } + } + let mut announcement_sigs = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -4985,6 +5061,9 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills, }) } }