// Note that this may be an outbound SCID alias for the associated channel.
prev_short_channel_id: u64,
prev_htlc_id: u64,
+ prev_channel_id: ChannelId,
prev_funding_outpoint: OutPoint,
prev_user_channel_id: u128,
}
incoming_packet_shared_secret: [u8; 32],
phantom_shared_secret: Option<[u8; 32]>,
blinded_failure: Option<BlindedFailure>,
+ channel_id: ChannelId,
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
impl From<&ClaimableHTLC> for events::ClaimedHTLC {
fn from(val: &ClaimableHTLC) -> Self {
events::ClaimedHTLC {
- channel_id: val.prev_hop.outpoint.to_channel_id(),
+ channel_id: val.prev_hop.channel_id,
user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
cltv_expiry: val.cltv_expiry,
value_msat: val.value,
///
/// Note that any such events are lost on shutdown, so in general they must be updates which
/// are regenerated on startup.
- ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+ ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
/// channel to continue normal operation.
///
MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: PublicKey,
funding_txo: OutPoint,
+ channel_id: ChannelId,
update: ChannelMonitorUpdate
},
/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
/// outbound edge.
EmitEventAndFreeOtherChannel {
event: events::Event,
- downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+ downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
},
/// Indicates we should immediately resume the operation of another channel, unless there is
/// some other reason why the channel is blocked. In practice this simply means immediately
downstream_counterparty_node_id: PublicKey,
downstream_funding_outpoint: OutPoint,
blocking_action: RAAMonitorUpdateBlockingAction,
+ downstream_channel_id: ChannelId,
},
}
(0, downstream_counterparty_node_id, required),
(2, downstream_funding_outpoint, required),
(4, blocking_action, required),
+ // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
},
(2, EmitEventAndFreeOtherChannel) => {
(0, event, upgradable_required),
ReleaseRAAChannelMonitorUpdate {
counterparty_node_id: PublicKey,
channel_funding_outpoint: OutPoint,
+ channel_id: ChannelId,
},
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
(0, channel_funding_outpoint, required),
(2, counterparty_node_id, required),
+ // Note that by the time we get past the required read above, channel_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
};
);
impl RAAMonitorUpdateBlockingAction {
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
- channel_id: prev_hop.outpoint.to_channel_id(),
+ channel_id: prev_hop.channel_id,
htlc_id: prev_hop.htlc_id,
}
}
pub counterparty: ChannelCounterparty,
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
- ///
- /// Note that, if this has been set, `channel_id` will be equivalent to
- /// `funding_txo.unwrap().to_channel_id()`.
pub funding_txo: Option<OutPoint>,
/// The features which this channel operates with. See individual features for more info.
///
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+ if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
}
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
if let Some(funding_batch_state) = funding_batch_state.as_mut() {
- funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+ // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+ // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+ // want to support V2 batching here as well.
+ funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
}
Ok(outpoint)
})
});
}
}
+ mem::drop(funding_batch_states);
for shutdown_result in shutdown_results.drain(..) {
self.finish_close_channel(shutdown_result);
}
let mut per_source_pending_forward = [(
payment.prev_short_channel_id,
payment.prev_funding_outpoint,
+ payment.prev_channel_id,
payment.prev_user_channel_id,
vec![(pending_htlc_info, payment.prev_htlc_id)]
)];
short_channel_id: payment.prev_short_channel_id,
user_channel_id: Some(payment.prev_user_channel_id),
outpoint: payment.prev_funding_outpoint,
+ channel_id: payment.prev_channel_id,
htlc_id: payment.prev_htlc_id,
incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
let mut new_events = VecDeque::new();
let mut failed_forwards = Vec::new();
- let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+ let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
{
let mut forward_htlcs = HashMap::new();
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
outgoing_cltv_value, ..
}
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
outgoing_cltv_value, Some(phantom_shared_secret), false, None,
current_height, self.default_configuration.accept_mpp_keysend)
{
- Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+ Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
routing: PendingHTLCRouting::Forward {
onion_packet, blinded, ..
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
skimmed_fee_msat, ..
}
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: $htlc.prev_hop.short_channel_id,
user_channel_id: $htlc.prev_hop.user_channel_id,
+ channel_id: prev_channel_id,
outpoint: prev_funding_outpoint,
htlc_id: $htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
#[allow(unused_assignments)] {
committed_to_claimable = true;
}
- let prev_channel_id = prev_funding_outpoint.to_channel_id();
htlcs.push(claimable_htlc);
let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
for event in background_events.drain(..) {
match event {
- BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+ BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
let mut updated_chan = false;
{
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+ match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
updated_chan = true;
},
HTLCSource::PreviousHopData(HTLCPreviousHopData {
ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
- ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+ ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+ WithContext::from(&self.logger, None, Some(*channel_id)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
if push_forward_ev { self.push_pending_forwards_ev(); }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push_back((events::Event::HTLCHandlingFailed {
- prev_channel_id: outpoint.to_channel_id(),
+ prev_channel_id: *channel_id,
failed_next_destination: destination,
}, None));
},
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+ let prev_hop_chan_id = htlc.prev_hop.channel_id;
if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
{
let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
+ let chan_id = prev_hop.channel_id;
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: prev_hop.outpoint,
+ channel_id: prev_hop.channel_id,
update: monitor_update.clone(),
});
}
log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
chan_id, action);
- let (node_id, funding_outpoint, blocker) =
+ let (node_id, _funding_outpoint, channel_id, blocker) =
if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: node_id,
downstream_funding_outpoint: funding_outpoint,
- blocking_action: blocker,
+ blocking_action: blocker, downstream_channel_id: channel_id,
} = action {
- (node_id, funding_outpoint, blocker)
+ (node_id, funding_outpoint, channel_id, blocker)
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
let mut peer_state = peer_state_mtx.lock().unwrap();
if let Some(blockers) = peer_state
.actions_blocking_raa_monitor_updates
- .get_mut(&funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
let mut found_blocker = false;
blockers.retain(|iter| {
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
}],
+ channel_id: Some(prev_hop.channel_id),
};
if !during_init {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
} else {
// complete the monitor update completion action from `completion_action`.
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
- prev_hop.outpoint, preimage_update,
+ prev_hop.outpoint, prev_hop.channel_id, preimage_update,
)));
}
// Note that we do process the completion action here. This totally could be a
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
- forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
- next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+ startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert_eq!(pubkey, path.hops[0].pubkey);
}
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint: next_channel_outpoint,
+ channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
counterparty_node_id: path.hops[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
- let prev_outpoint = hop_data.outpoint;
+ let prev_channel_id = hop_data.channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
+ #[cfg(debug_assertions)]
+ let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
- Some((node_id, next_channel_outpoint, completed_blocker))
+ Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
} else {
// We can only get `None` here if we are processing a
// `ChannelMonitor`-originated event, in which case we
},
// or the channel we'd unblock is already closed,
BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
- (funding_txo, monitor_update)
+ (funding_txo, _channel_id, monitor_update)
) => {
if *funding_txo == next_channel_outpoint {
assert_eq!(monitor_update.updates.len(), 1);
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+ *channel_id == claiming_channel_id,
}
}), "{:?}", *background_events);
}
Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
downstream_counterparty_node_id: other_chan.0,
downstream_funding_outpoint: other_chan.1,
- blocking_action: other_chan.2,
+ downstream_channel_id: other_chan.2,
+ blocking_action: other_chan.3,
})
} else { None }
} else {
- let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None }
} else { None };
+ debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+ "skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- fee_earned_msat,
+ total_fee_earned_msat,
claim_from_onchain_tx: from_onchain,
- prev_channel_id: Some(prev_outpoint.to_channel_id()),
- next_channel_id: Some(next_channel_outpoint.to_channel_id()),
+ prev_channel_id: Some(prev_channel_id),
+ next_channel_id: Some(next_channel_id),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ skimmed_fee_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
event, downstream_counterparty_and_funding_outpoint
} => {
self.pending_events.lock().unwrap().push_back((event, None));
- if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
- self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+ if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+ self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
}
},
MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
- downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+ downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
} => {
self.handle_monitor_update_release(
downstream_counterparty_node_id,
downstream_funding_outpoint,
+ downstream_channel_id,
Some(blocking_action),
);
},
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
- -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+ -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
let logger = WithChannelContext::from(&self.logger, &channel.context);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
let counterparty_node_id = channel.context.get_counterparty_node_id();
if !pending_forwards.is_empty() {
htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
- channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+ channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
}
if let Some(msg) = channel_ready {
htlc_forwards
}
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
let counterparty_node_id = match counterparty_node_id {
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
- .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+ .remove(&channel_id).unwrap_or(Vec::new());
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(update_actions);
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
- let (htlc_source, forwarded_htlc_value) = {
+ let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+ Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+ funding_txo, msg.channel_id
+ );
+
Ok(())
}
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut push_forward_event = false;
let mut new_intercept_events = VecDeque::new();
let mut failed_intercept_forwards = Vec::new();
match forward_htlcs.entry(scid) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
},
hash_map::Entry::Vacant(entry) => {
if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
intercept_id
}, None));
entry.insert(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
user_channel_id: Some(prev_user_channel_id),
outpoint: prev_funding_outpoint,
+ channel_id: prev_channel_id,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
phantom_shared_secret: None,
push_forward_event = true;
}
entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+ prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
}
}
}
/// the [`ChannelMonitorUpdate`] in question.
fn raa_monitor_updates_held(&self,
actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
- channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
) -> bool {
actions_blocking_raa_monitor_updates
- .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+ .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint,
+ channel_id,
counterparty_node_id,
})
})
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
}
}
false
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
- &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
*counterparty_node_id)
} else { false };
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+ self.claim_funds_internal(htlc_update.source, preimage,
+ htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+ false, counterparty_node_id, funding_outpoint, channel_id);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_funding_outpoint) => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
}
}
},
- MonitorEvent::Completed { funding_txo, monitor_update_id } => {
- self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+ MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
},
}
}
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
// timer_tick_occurred, guaranteeing we're running normally.
- if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
assert!(should_broadcast);
} else { unreachable!(); }
self.pending_background_events.lock().unwrap().push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, update, channel_id,
});
}
self.finish_close_channel(failure);
let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
.map_err(|_| Bolt12SemanticError::MissingPaths)?;
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = refund.respond_using_derived_keys(
payment_paths, payment_hash, expanded_key, entropy
)?;
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = refund.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at, expanded_key, entropy
)?;
/// [`Event`] being handled) completes, this should be called to restore the channel to normal
/// operation. It will double-check that nothing *else* is also blocking the same channel from
/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
- fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+ channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+ mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+ &self.logger, Some(counterparty_node_id), Some(channel_id),
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(blocker) = completed_blocker.take() {
// Only do this on the first iteration of the loop.
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
- .get_mut(&channel_funding_outpoint.to_channel_id())
+ .get_mut(&channel_id)
{
blockers.retain(|iter| iter != &blocker);
}
}
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
- channel_funding_outpoint, counterparty_node_id) {
+ channel_funding_outpoint, channel_id, counterparty_node_id) {
// Check that, while holding the peer lock, we don't have anything else
// blocking monitor updates for this channel. If we do, release the monitor
// update(s) when those blockers complete.
log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
- &channel_funding_outpoint.to_channel_id());
+ &channel_id);
break;
}
- if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+ channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
}
} else {
log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
- channel_funding_outpoint.to_channel_id());
+ channel_id);
}
}
}
for action in actions {
match action {
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
- channel_funding_outpoint, counterparty_node_id
+ channel_funding_outpoint, channel_id, counterparty_node_id
} => {
- self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
}
}
}
incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
phantom_shared_secret: None,
outpoint: htlc.prev_funding_outpoint,
+ channel_id: htlc.prev_channel_id,
blinded_failure: htlc.forward_info.routing.blinded_failure(),
});
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+ &self.logger, None, Some(htlc.prev_channel_id)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
},
};
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let created_at = Duration::from_secs(
self.highest_seen_timestamp.load(Ordering::Acquire) as u64
);
if invoice_request.keys.is_some() {
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_using_derived_keys(
payment_paths, payment_hash
);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_using_derived_keys_no_std(
payment_paths, payment_hash, created_at
);
Err(error) => Some(OffersMessage::InvoiceError(error.into())),
}
} else {
- #[cfg(not(feature = "no-std"))]
+ #[cfg(feature = "std")]
let builder = invoice_request.respond_with(payment_paths, payment_hash);
- #[cfg(feature = "no-std")]
+ #[cfg(not(feature = "std"))]
let builder = invoice_request.respond_with_no_std(
payment_paths, payment_hash, created_at
);
(4, htlc_id, required),
(6, incoming_packet_shared_secret, required),
(7, user_channel_id, option),
+ // Note that by the time we get past the required read for type 2 above, outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
});
impl Writeable for ClaimableHTLC {
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
+ // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
+ // filled in, so we can safely unwrap it here.
+ (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
});
impl Writeable for HTLCForwardInfo {
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
let mut close_background_events = Vec::new();
+ let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize);
for _ in 0..channel_count {
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
let logger = WithChannelContext::from(&args.logger, &channel.context);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
return Err(DecodeError::InvalidValue);
}
- if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+ if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id, funding_txo, update
+ counterparty_node_id, funding_txo, channel_id, update
});
}
failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
for (funding_txo, monitor) in args.channel_monitors.iter() {
if !funding_txo_set.contains(funding_txo) {
let logger = WithChannelMonitor::from(&args.logger, monitor);
+ let channel_id = monitor.channel_id();
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
- &funding_txo.to_channel_id());
+ &channel_id);
let monitor_update = ChannelMonitorUpdate {
update_id: CLOSED_CHANNEL_UPDATE_ID,
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+ channel_id: Some(monitor.channel_id()),
};
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+ close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
}
}
$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
for update in $chan_in_flight_upds.iter() {
log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
- update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+ update.update_id, $channel_info_log, &$monitor.channel_id());
max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
pending_background_events.push(
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id: $counterparty_node_id,
funding_txo: $funding_txo,
+ channel_id: $monitor.channel_id(),
update: update.clone(),
});
}
pending_background_events.push(
BackgroundEvent::MonitorUpdatesComplete {
counterparty_node_id: $counterparty_node_id,
- channel_id: $funding_txo.to_channel_id(),
+ channel_id: $monitor.channel_id(),
});
}
if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
- let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+ let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
funding_txo, monitor, peer_state, logger, "closed ");
} else {
log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(logger, " The ChannelMonitor for channel {} is missing.",
- &funding_txo.to_channel_id());
+ log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+ channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
false
} else { true }
} else { true }
pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
if pending_forward_matches_htlc(&htlc_info) {
log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
- &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+ &htlc.payment_hash, &monitor.channel_id());
pending_events_read.retain(|(event, _)| {
if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
intercepted_id != ev_id
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
+ channel_id: monitor.channel_id(),
counterparty_node_id: path.hops[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
// channel_id -> peer map entry).
counterparty_opt.is_none(),
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
- monitor.get_funding_txo().0))
+ monitor.get_funding_txo().0, monitor.channel_id()))
} else { None }
} else {
// If it was an outbound payment, we've handled it above - if a preimage
// this channel as well. On the flip side, there's no harm in restarting
// without the new monitor persisted - we'll end up right back here on
// restart.
- let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+ let previous_channel_id = claimable_htlc.prev_hop.channel_id;
if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
- Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+ Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
- blocked_channel_outpoint.to_channel_id());
+ blocked_channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
- .entry(blocked_channel_outpoint.to_channel_id())
+ .entry(*blocked_channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
// If the channel we were blocking has closed, we don't need to
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
- channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, true, downstream_node_id, downstream_funding);
+ channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
+ downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use bitcoin::blockdata::locktime::absolute::LockTime;
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, Transaction, TxOut};
+ use bitcoin::{Transaction, TxOut};
use crate::sync::{Arc, Mutex, RwLock};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let scorer = RwLock::new(test_utils::TestScorer::new());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
let mut config: UserConfig = Default::default();
config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);