+# 0.0.100 - WIP
+
+## Serialization Compatibility
+ * HTLCs which were in the process of being claimed on-chain when a pre-0.0.100
+ `ChannelMonitor` was serialized may generate `PaymentForwarded` events with
+ spurious `fee_earned_msat` values. This only applies to payments which were
+ unresolved at the time of the upgrade.
+ * 0.0.100 clients with pending PaymentForwarded events at serialization-time
+ will generate serialized `ChannelManager` objects which 0.0.99 and earlier
+ clients cannot read. The likelihood of this can be reduced by ensuring you
+ process all pending events immediately before serialization (as is done by
+ the `lightning-background-processor` crate).
+
+
# 0.0.99 - 2021-07-09
## API Updates
},
events::Event::PaymentSent { .. } => {},
events::Event::PaymentFailed { .. } => {},
+ events::Event::PaymentForwarded { .. } if $node == 1 => {},
events::Event::PendingHTLCsForwardable { .. } => {
nodes[$node].process_pending_htlc_forwards();
},
//TODO: enhance by fetching random amounts from fuzz input?
payments_received.push(payment_hash);
},
- Event::PaymentSent {..} => {},
- Event::PaymentFailed {..} => {},
Event::PendingHTLCsForwardable {..} => {
should_forward = true;
},
- Event::SpendableOutputs {..} => {},
+ _ => {},
}
}
}
pub struct HTLCUpdate {
pub(crate) payment_hash: PaymentHash,
pub(crate) payment_preimage: Option<PaymentPreimage>,
- pub(crate) source: HTLCSource
+ pub(crate) source: HTLCSource,
+ pub(crate) onchain_value_satoshis: Option<u64>,
}
impl_writeable_tlv_based!(HTLCUpdate, {
(0, payment_hash, required),
+ (1, onchain_value_satoshis, option),
(2, source, required),
(4, payment_preimage, option),
});
HTLCUpdate {
source: HTLCSource,
payment_hash: PaymentHash,
+ onchain_value_satoshis: Option<u64>,
},
MaturingOutput {
descriptor: SpendableOutputDescriptor,
impl_writeable_tlv_based_enum!(OnchainEvent,
(0, HTLCUpdate) => {
(0, source, required),
+ (1, onchain_value_satoshis, option),
(2, payment_hash, required),
},
(1, MaturingOutput) => {
event: OnchainEvent::HTLCUpdate {
source: (**source).clone(),
payment_hash: htlc.payment_hash.clone(),
+ onchain_value_satoshis: Some(htlc.amount_msat / 1000),
},
};
log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold());
event: OnchainEvent::HTLCUpdate {
source: (**source).clone(),
payment_hash: htlc.payment_hash.clone(),
+ onchain_value_satoshis: Some(htlc.amount_msat / 1000),
},
});
}
height,
event: OnchainEvent::HTLCUpdate {
source: source.clone(), payment_hash: htlc.payment_hash,
+ onchain_value_satoshis: Some(htlc.amount_msat / 1000)
},
};
log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})",
// Produce actionable events from on-chain events having reached their threshold.
for entry in onchain_events_reaching_threshold_conf.drain(..) {
match entry.event {
- OnchainEvent::HTLCUpdate { ref source, payment_hash } => {
+ OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis } => {
// Check for duplicate HTLC resolutions.
#[cfg(debug_assertions)]
{
log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
- payment_hash: payment_hash,
+ payment_hash,
payment_preimage: None,
source: source.clone(),
+ onchain_value_satoshis,
}));
},
OnchainEvent::MaturingOutput { descriptor } => {
if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
if let &Some(ref source) = pending_source {
log_claim!("revoked counterparty commitment tx", false, pending_htlc, true);
- payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
+ payment_data = Some(((**source).clone(), $htlc_output.payment_hash, $htlc_output.amount_msat));
break;
}
}
// transaction. This implies we either learned a preimage, the HTLC
// has timed out, or we screwed up. In any case, we should now
// resolve the source HTLC with the original sender.
- payment_data = Some(((*source).clone(), htlc_output.payment_hash));
+ payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat));
} else if !$holder_tx {
check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
if payment_data.is_none() {
// Check that scan_commitment, above, decided there is some source worth relaying an
// HTLC resolution backwards to and figure out whether we learned a preimage from it.
- if let Some((source, payment_hash)) = payment_data {
+ if let Some((source, payment_hash, amount_msat)) = payment_data {
let mut payment_preimage = PaymentPreimage([0; 32]);
if accepted_preimage_claim {
if !self.pending_monitor_events.iter().any(
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source,
payment_preimage: Some(payment_preimage),
- payment_hash
+ payment_hash,
+ onchain_value_satoshis: Some(amount_msat / 1000),
}));
}
} else if offered_preimage_claim {
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
source,
payment_preimage: Some(payment_preimage),
- payment_hash
+ payment_hash,
+ onchain_value_satoshis: Some(amount_msat / 1000),
}));
}
} else {
let entry = OnchainEventEntry {
txid: tx.txid(),
height,
- event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash },
+ event: OnchainEvent::HTLCUpdate {
+ source, payment_hash,
+ onchain_value_satoshis: Some(amount_msat / 1000),
+ },
};
log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
}
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
check_added_monitors!(nodes[1], 1);
let mut bs_updates = None;
enum UpdateFulfillFetch {
NewClaim {
monitor_update: ChannelMonitorUpdate,
+ htlc_value_msat: u64,
msg: Option<msgs::UpdateFulfillHTLC>,
},
DuplicateClaim {},
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
monitor_update: ChannelMonitorUpdate,
+ /// The value of the HTLC which was claimed, in msat.
+ htlc_value_msat: u64,
/// The update_fulfill message and commitment_signed message (if the claim was not placed
/// in the holding cell).
msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<Signer: Sign> {
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) config: ChannelConfig,
+ #[cfg(not(any(test, feature = "_test_utils")))]
config: ChannelConfig,
user_id: u64,
// these, but for now we just have to treat them as normal.
let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
if htlc.htlc_id == htlc_id_arg {
assert_eq!(htlc.payment_hash, payment_hash_calc);
}
}
pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
break;
}
}
// TODO: We may actually be able to switch to a fulfill here, though its
// rare enough it may not be worth the complexity burden.
debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
- return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
},
_ => {}
});
#[cfg(any(test, feature = "fuzztarget"))]
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
#[cfg(any(test, feature = "fuzztarget"))]
self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
if let InboundHTLCState::Committed = htlc.state {
} else {
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
}
log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
UpdateFulfillFetch::NewClaim {
monitor_update,
+ htlc_value_msat,
msg: Some(msgs::UpdateFulfillHTLC {
channel_id: self.channel_id(),
htlc_id: htlc_id_arg,
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
Err(e) => return Err((e, monitor_update)),
Ok(res) => res
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) })
+ Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
},
- UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }),
+ UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
+ Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
}
}
/// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
#[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
for htlc in self.pending_outbound_htlcs.iter_mut() {
if htlc.htlc_id == htlc_id {
match check_preimage {
OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
}
- return Ok(&htlc.source);
+ return Ok(htlc);
}
}
Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
}
}
let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner());
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone())
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
}
pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
// in it hitting the holding cell again and we cannot change the state of a
// holding cell HTLC from fulfill to anything else.
let (update_fulfill_msg_option, mut additional_monitor_update) =
- if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+ if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
(msg, monitor_update)
} else { unreachable!() };
update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
}
}
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+ PrevHopForceClosed,
+ MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+ Success(u64),
+ DuplicateClaim,
+}
+
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
} else {
match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
- Err(Some(e)) => {
- if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+ ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+ if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+ log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
claimed_any_htlcs = true;
- } else { errs.push(e); }
+ } else { errs.push((pk, err)); }
},
- Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
- Ok(()) => claimed_any_htlcs = true,
+ ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+ _ => claimed_any_htlcs = true,
}
}
}
} else { false }
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => {
- return Err(None)
+ return ClaimFundsFromHop::PrevHopForceClosed
}
};
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
Ok(msgs_monitor_option) => {
- if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
+ if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
"Failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, e);
- return Err(Some((
+ return ClaimFundsFromHop::MonitorUpdateFail(
chan.get().get_counterparty_node_id(),
handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
- )));
+ Some(htlc_value_msat)
+ );
}
if let Some((msg, commitment_signed)) = msgs {
log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
}
});
}
+ return ClaimFundsFromHop::Success(htlc_value_msat);
+ } else {
+ return ClaimFundsFromHop::DuplicateClaim;
}
- return Ok(())
},
Err((e, monitor_update)) => {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
if drop {
chan.remove_entry();
}
- return Err(Some((counterparty_node_id, res)));
+ return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
},
}
} else { unreachable!(); }
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
match source {
HTLCSource::OutboundRoute { session_priv, .. } => {
mem::drop(channel_state_lock);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
- Ok(()) => Ok(()),
- Err(None) => {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage.clone(),
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an offchain preimage event from the forward link (the
- // event being update_fulfill_htlc).
- if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- }
- Ok(())
- },
- Err(Some(res)) => Err(res),
- } {
- mem::drop(channel_state_lock);
- let res: Result<(), _> = Err(err);
- let _ = handle_error!(self, res, counterparty_node_id);
+ let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+ let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+ let htlc_claim_value_msat = match res {
+ ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+ ClaimFundsFromHop::Success(amt) => Some(amt),
+ _ => None,
+ };
+ if let ClaimFundsFromHop::PrevHopForceClosed = res {
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage.clone(),
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an offchain preimage event from the forward link (the
+ // event being update_fulfill_htlc).
+ if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ }
+ // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+ // totally could be a duplicate claim, but we have no way of knowing
+ // without interrogating the `ChannelMonitor` we've provided the above
+ // update to. Instead, we simply document in `PaymentForwarded` that this
+ // can happen.
+ }
+ mem::drop(channel_state_lock);
+ if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+ let result: Result<(), _> = Err(err);
+ let _ = handle_error!(self, result, pk);
+ }
+
+ if claimed_htlc {
+ if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+ Some(claimed_htlc_value - forwarded_htlc_value)
+ } else { None };
+
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ });
+ }
}
},
}
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_lock = self.channel_state.lock().unwrap();
- let htlc_source = {
+ let (htlc_source, forwarded_htlc_value) = {
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
Ok(())
}
/// Process pending events from the `chain::Watch`, returning whether any events were processed.
fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
- let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for monitor_event in pending_monitor_events {
+ for monitor_event in pending_monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
}
+macro_rules! expect_payment_forwarded {
+ ($node: expr, $expected_fee: expr, $upstream_force_closed: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+ assert_eq!(fee_earned_msat, $expected_fee);
+ assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
#[cfg(test)]
macro_rules! expect_payment_failure_chan_update {
($node: expr, $scid: expr, $chan_closed: expr) => {
($node: expr, $prev_node: expr, $new_msgs: expr) => {
{
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
+ let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat;
+ expect_payment_forwarded!($node, Some(fee as u64), false);
check_added_monitors!($node, 1);
let new_next_msgs = if $new_msgs {
let events = $node.node.get_and_clear_pending_msg_events();
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
check_added_monitors!(nodes[1], 1);
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
check_added_monitors!(nodes[1], 1);
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
added_monitors.clear();
}
+ let forwarded_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(forwarded_events.len(), 2);
+ if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
+ } else { panic!(); }
+ if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
+ } else { panic!(); }
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
// So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_forwarded!(nodes[1], Some(1000), true);
{
let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
// ChannelMonitor: claim tx
check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
b_txn.clear();
}
- check_added_monitors!(nodes[1], 1);
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
check_added_monitors!(nodes[1], 1);
expect_payment_failed!(nodes[0], duplicate_payment_hash, false);
// Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
+ // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
+ // and nodes[2] fee) is rounded down and then claimed in full.
mine_transaction(&nodes[1], &htlc_success_txn[0]);
+ expect_payment_forwarded!(nodes[1], Some(196*2), true);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
+ expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
// If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
if !go_onchain_before_fulfill && broadcast_alice {
let events = nodes[1].node.get_and_clear_pending_msg_events();
connect_block(&nodes[1], &block);
// ChannelManager only polls chain::Watch::release_pending_monitor_events when we
- // probe it for events, so we probe non-message events here (which should still end up empty):
- assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+ // probe it for events, so we probe non-message events here (which should just be the
+ // PaymentForwarded event).
+ expect_payment_forwarded!(nodes[1], Some(1000), true);
} else {
// Confirm the timeout tx and check that we fail the HTLC backwards
let block = Block {
/// The outputs which you should store as spendable by you.
outputs: Vec<SpendableOutputDescriptor>,
},
+ /// This event is generated when a payment has been successfully forwarded through us and a
+ /// forwarding fee earned.
+ PaymentForwarded {
+ /// The fee, in milli-satoshis, which was earned as a result of the payment.
+ ///
+ /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
+ /// was pending, the amount the next hop claimed will have been rounded down to the nearest
+ /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still
+ /// claimed the full value in millisatoshis from the source. In this case,
+ /// `claim_from_onchain_tx` will be set.
+ ///
+ /// If the channel which sent us the payment has been force-closed, we will claim the funds
+ /// via an on-chain transaction. In that case we do not yet know the on-chain transaction
+ /// fees which we will spend and will instead set this to `None`. It is possible duplicate
+ /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
+ /// `None`.
+ fee_earned_msat: Option<u64>,
+ /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
+ /// transaction.
+ claim_from_onchain_tx: bool,
+ },
}
impl Writeable for Event {
(0, VecWriteWrapper(outputs), required),
});
},
+ &Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+ 7u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, fee_earned_msat, option),
+ (2, claim_from_onchain_tx, required),
+ });
+ },
}
Ok(())
}
};
f()
},
+ 7u8 => {
+ let f = || {
+ let mut fee_earned_msat = None;
+ let mut claim_from_onchain_tx = false;
+ read_tlv_fields!(reader, {
+ (0, fee_earned_msat, option),
+ (2, claim_from_onchain_tx, required),
+ });
+ Ok(Some(Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx }))
+ };
+ f()
+ },
// Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
x if x % 2 == 1 => Ok(None),
_ => Err(msgs::DecodeError::InvalidValue)