X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;fp=lightning%2Fsrc%2Fln%2Fchannel.rs;h=6d95525773445363361f5c6b5ba0d9f5da499d9f;hb=7e78fa660cec8a73286c94c1073ee588140e7a01;hp=d8a284fa8184cd9e1d030043c351c09c144d4039;hpb=fecac81874c41724a855c9e696c312353609df23;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d8a284fa..6d955257 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -444,6 +444,15 @@ pub(super) struct Channel { /// /// See-also pub workaround_lnd_bug_4006: Option, + + #[cfg(any(test, feature = "fuzztarget"))] + // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the + // corresponding HTLC on the inbound path. If, then, the outbound path channel is + // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack + // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This + // is fine, but as a sanity check in our failure to generate the second claim, we check here + // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. + historical_inbound_htlc_fulfills: HashSet, } #[cfg(any(test, feature = "fuzztarget"))] @@ -645,6 +654,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }) } @@ -890,6 +902,9 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills: HashSet::new(), }; Ok(chan) @@ -1249,8 +1264,8 @@ impl Channel { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } - debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled"); return Ok((None, None)); }, _ => { @@ -1263,7 +1278,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and + // this is simply a duplicate claim, not previously failed and we lost funds. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok((None, None)); } // Now update local state: @@ -1285,7 +1304,8 @@ impl Channel { if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.latest_monitor_update_id -= 1; - debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled"); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return Ok((None, None)); } }, @@ -1305,8 +1325,12 @@ impl Channel { self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return Ok((None, Some(monitor_update))); } + #[cfg(any(test, feature = "fuzztarget"))] + self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.pending_inbound_htlcs[pending_idx]; @@ -1366,8 +1390,11 @@ impl Channel { if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(_) => { - debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled"); + InboundHTLCState::LocalRemoved(ref reason) => { + if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + } else { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); + } return Ok(None); }, _ => { @@ -1379,7 +1406,11 @@ impl Channel { } } if pending_idx == core::usize::MAX { - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this + // is simply a duplicate fail, not previously failed and we failed-back too early. + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } // Now update local state: @@ -1388,8 +1419,9 @@ impl Channel { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already fulfilled"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + #[cfg(any(test, feature = "fuzztarget"))] + debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); + return Ok(None); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -2453,7 +2485,14 @@ impl Channel { }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) { - Ok(update_fail_msg_option) => update_fail_htlcs.push(update_fail_msg_option.unwrap()), + Ok(update_fail_msg_option) => { + // If an HTLC failure was previously added to the holding cell (via + // `get_update_fail_htlc`) then generating the fail message itself + // must not fail - we should never end up in a state where we + // double-fail an HTLC or fail-then-claim an HTLC as it indicates + // we didn't wait for a full revocation before failing. + update_fail_htlcs.push(update_fail_msg_option.unwrap()) + }, Err(e) => { if let ChannelError::Ignore(_) = e {} else { @@ -4690,6 +4729,13 @@ impl Writeable for Channel { self.channel_update_status.write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + (self.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; + #[cfg(any(test, feature = "fuzztarget"))] + for htlc in self.historical_inbound_htlc_fulfills.iter() { + htlc.write(writer)?; + } + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -4882,6 +4928,16 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel let channel_update_status = Readable::read(reader)?; + #[cfg(any(test, feature = "fuzztarget"))] + let mut historical_inbound_htlc_fulfills = HashSet::new(); + #[cfg(any(test, feature = "fuzztarget"))] + { + let htlc_fulfills_len: u64 = Readable::read(reader)?; + for _ in 0..htlc_fulfills_len { + assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); + } + } + let mut announcement_sigs = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -4973,6 +5029,9 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + + #[cfg(any(test, feature = "fuzztarget"))] + historical_inbound_htlc_fulfills, }) } }