let counterparty_node_id = shutdown_res.counterparty_node_id;
let channel_id = shutdown_res.channel_id;
let logger = WithContext::from(
- &$self.logger, Some(counterparty_node_id), Some(channel_id),
+ &$self.logger, Some(counterparty_node_id), Some(channel_id), None
);
log_error!(logger, "Force-closing channel: {}", err.err);
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
ChannelError::Close(msg) => {
- let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+ let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
update_maps_on_chan_removal!($self, $channel.context);
let reason = ClosureReason::ProcessingError { err: msg.clone() };
macro_rules! handle_monitor_update_completion {
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
- let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
let mut updates = $chan.monitor_updating_restored(&&logger,
&$self.node_signer, $self.chain_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height);
macro_rules! handle_new_monitor_update {
($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
- let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+ let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
match $update_res {
ChannelMonitorUpdateStatus::UnrecoverableError => {
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
}
let logger = WithContext::from(
- &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+ &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
);
log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
} else {
ClosureReason::HolderForceClosed
};
- let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id);
let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
}
log_info!(
- WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
+ WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
"Failed to accept/forward incoming HTLC: {}", err_msg
);
// If `msg.blinding_point` is set, we must always fail with malformed.
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
if msg.blinding_point.is_some() {
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
self.get_channel_update_for_unicast(chan)
}
/// [`channel_update`]: msgs::ChannelUpdate
/// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
}
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
payment_hash, keysend_preimage, prng_seed
).map_err(|e| {
- let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
e
})?;
let err: Result<(), _> = loop {
let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
None => {
- let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+ let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
},
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
log_trace!(logger,
"Attempting to send payment with payment hash {} along path with next hop {}",
payment_hash, path.hops.first().unwrap().short_channel_id);
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
let funding_txo = chan.context.get_funding_txo().unwrap();
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
},
}
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
match funding_res {
Ok(funding_msg) => (chan, funding_msg),
None => {
let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id);
- let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+ let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
return Err(APIError::ChannelUnavailable {
err: error
// Process the HTLC on the incoming channel.
match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
chan.can_accept_incoming_htlc(
update_add_htlc, &self.fee_estimator, &logger,
)
}) => {
macro_rules! failure_handler {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
- let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
+ let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
for forward_info in pending_forwards.drain(..) {
let queue_fail_htlc_res = match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
}, skimmed_fee_msat, ..
},
}) => {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
| {
context.maybe_expire_prev_config();
if unfunded_context.should_expire_unfunded_channel() {
- let logger = WithChannelContext::from(&self.logger, context);
+ let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger,
"Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
update_maps_on_chan_removal!(self, &context);
chan.context.maybe_expire_prev_config();
if chan.should_disconnect_peer_awaiting_response() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
counterparty_node_id, chan_id);
pending_msg_events.push(MessageSendEvent::HandleError {
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
peer_state.pending_msg_events.push(
events::MessageSendEvent::HandleError {
ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
}) => {
log_trace!(
- WithContext::from(&self.logger, None, Some(*channel_id)),
+ WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
);
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
+ let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
} else { errs.push((pk, err)); }
}
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let counterparty_node_id = chan.context.get_counterparty_node_id();
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
match fulfill_res {
// with a preimage we *must* somehow manage to propagate it to the upstream
// channel, or we must have an ability to receive the same event and try
// again on restart.
- log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+ log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
"Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
- let logger = WithChannelContext::from(&self.logger, &channel.context);
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
&channel.context.channel_id(),
if raa.is_some() { "an" } else { "no" },
pending.retain(|upd| upd.update_id > highest_applied_update_id);
pending.len()
} else { 0 };
- let logger = WithChannelContext::from(&self.logger, &channel.context);
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
remaining_in_flight);
fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
let peers_without_funded_channels =
let (mut chan, funding_msg_opt, monitor) =
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
- let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+ let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
Ok(res) => res,
Err((inbound_chan, err)) => {
}
Ok(())
} else {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
fail_chan!("Duplicate funding outpoint");
}
let logger = WithContext::from(
&self.logger,
Some(chan.context.get_counterparty_node_id()),
- Some(chan.context.channel_id())
+ Some(chan.context.channel_id()),
+ None
);
let res =
chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
if let Some(announcement_sigs) = announcement_sigs_opt {
match phase {
ChannelPhase::Funded(chan) => {
if !chan.received_shutdown() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
msg.channel_id,
if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
},
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
let context = phase.context_mut();
- let logger = WithChannelContext::from(&self.logger, context);
+ let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
let mut chan = remove_channel_phase!(self, chan_phase_entry);
finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
};
if let Some(broadcast_tx) = tx {
let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
- log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
+ log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
if let Some(ChannelPhase::Funded(chan)) = chan_option {
),
Err(e) => PendingHTLCStatus::Fail(e)
};
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
msg.channel_id);
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo = chan.context.get_funding_txo();
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
},
hash_map::Entry::Occupied(_) => {
- let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
+ let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
self.raa_monitor_updates_held(
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersistNoEvents);
} else {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
// If nothing changed after applying their update, we don't need to bother
msg.channel_id
)
})?;
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
+ let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage,
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
+ chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}
peer_state.channel_by_id.retain(|channel_id, phase| {
match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
if let Some(msg) = msg_opt {
mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
let logger = WithContext::from(
- &self.logger, Some(counterparty_node_id), Some(channel_id),
+ &self.logger, Some(counterparty_node_id), Some(channel_id), None
);
loop {
let per_peer_state = self.per_peer_state.read().unwrap();
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
let _persistence_guard =
PersistenceNotifierGuard::optionally_notify_skipping_background_events(
self, || -> NotifyOption { NotifyOption::DoPersist });
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height;
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
}
}
self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
macro_rules! max_time {
($timestamp: expr) => {
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
- channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
+ channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
} else { Ok((None, Vec::new(), None)) }
});
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
- let logger = WithChannelContext::from(&self.logger, &channel.context);
+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.context.is_usable() {
HTLCFailReason::from_failure_code(0x2000 | 2),
HTLCDestination::InvalidForward { requested_forward_scid }));
let logger = WithContext::from(
- &self.logger, None, Some(htlc.prev_channel_id)
+ &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
);
log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
false
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
log_debug!(
- WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+ WithContext::from(&self.logger, Some(*counterparty_node_id), None, None),
"Marking channels with {} disconnected and generating channel_updates.",
log_pubkey!(counterparty_node_id)
);
peer_state.channel_by_id.retain(|_, phase| {
let context = match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
// We only retain funded channels that are not shutdown.
return true;
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
if !init_msg.features.supports_static_remote_key() {
log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
return Err(());
for (_, phase) in peer_state.channel_by_id.iter_mut() {
match phase {
ChannelPhase::Funded(chan) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context);
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&&logger),
let mut channel: Channel<SP> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
- let logger = WithChannelContext::from(&args.logger, &channel.context);
+ let logger = WithChannelContext::from(&args.logger, &channel.context, None);
let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
funding_txo_set.insert(funding_txo.clone());
// claim update ChannelMonitor updates were persisted prior to persising
// the ChannelMonitor update for the forward leg, so attempting to fail the
// backwards leg of the HTLC will simply be rejected.
+ let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
log_info!(logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
&channel.context.channel_id(), &payment_hash);
let peer_state = &mut *peer_state_lock;
for phase in peer_state.channel_by_id.values() {
if let ChannelPhase::Funded(chan) = phase {
- let logger = WithChannelContext::from(&args.logger, &chan.context);
+ let logger = WithChannelContext::from(&args.logger, &chan.context, None);
// Channels that were persisted have to be funded, otherwise they should have been
// discarded.
if let Some(in_flight_upds) = in_flight_monitor_updates {
for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
- let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
+ let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
// Now that we've removed all the in-flight monitor updates for channels that are
// still open, we need to replay any monitor updates that are for closed channels,
let peer_state = &mut *peer_state_lock;
for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
if let ChannelPhase::Funded(chan) = phase {
- let logger = WithChannelContext::from(&args.logger, &chan.context);
+ let logger = WithChannelContext::from(&args.logger, &chan.context, None);
if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
- let logger = WithChannelContext::from(&args.logger, &channel.context);
+ let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
}
}
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
if let Some(peer_state) = per_peer_state.get(&node_id) {
for (channel_id, actions) in monitor_update_blocked_actions.iter() {
- let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
+ let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
for action in actions.iter() {
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
downstream_counterparty_and_funding_outpoint:
}
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
} else {
- log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
+ log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
return Err(DecodeError::InvalidValue);
}
}
/// Append a message to a peer's pending outbound/write buffer
fn enqueue_message<M: wire::Type>(&self, peer: &mut Peer, message: &M) {
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
if is_gossip_msg(message.type_id()) {
log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0));
} else {
macro_rules! try_potential_handleerror {
($peer: expr, $thing: expr) => {{
let res = $thing;
- let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None);
+ let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None, None);
match res {
Ok(x) => x,
Err(e) => {
macro_rules! insert_node_id {
() => {
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) {
hash_map::Entry::Occupied(e) => {
log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0));
peer.pending_read_buffer.resize(18, 0);
peer.pending_read_is_header = true;
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
let message = match message_result {
Ok(x) => x,
Err(e) => {
message: wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>
) -> Result<Option<wire::Message<<<CMH as Deref>::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> {
let their_node_id = peer_lock.their_node_id.clone().expect("We know the peer's public key by the time we receive messages").0;
- let logger = WithContext::from(&self.logger, Some(their_node_id), None);
+ let logger = WithContext::from(&self.logger, Some(their_node_id), None, None);
let message = match self.do_handle_message_holding_peer_lock(peer_lock, message, &their_node_id, &logger)? {
Some(processed_message) => processed_message,
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
debug_assert!(peer.their_node_id.is_some());
debug_assert!(peer.channel_encryptor.is_ready_for_encryption());
- let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None);
+ let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None);
if peer.buffer_full_drop_gossip_broadcast() {
log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
for event in events_generated.drain(..) {
match event {
MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id)), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.common_fields.temporary_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id)), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.temporary_channel_id), None), "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
log_pubkey!(node_id),
&msg.temporary_channel_id,
ChannelId::v1_from_funding_txid(msg.funding_txid.as_byte_array(), msg.funding_output_index));
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReady event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReady event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendStfu { ref node_id, ref msg} => {
- let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None);
log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendSplice { ref node_id, ref msg} => {
- let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None);
log_debug!(logger, "Handling SendSplice event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendSpliceAck { ref node_id, ref msg} => {
- let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None);
log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendSpliceLocked { ref node_id, ref msg} => {
- let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id));
+ let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None);
log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxComplete event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxComplete event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendTxAbort event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAbort event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(commitment_signed.channel_id)), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(commitment_signed.channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
log_pubkey!(node_id),
update_add_htlcs.len(),
update_fulfill_htlcs.len(),
self.enqueue_message(&mut *peer, commitment_signed);
},
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling Shutdown event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling Shutdown event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id)), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
&msg.channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
- log_debug!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
+ log_debug!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}",
log_pubkey!(node_id),
msg.contents.short_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
},
MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => {
- log_trace!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
+ log_trace!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelUpdate event in peer_handler for node {} for channel {}",
log_pubkey!(node_id), msg.contents.short_channel_id);
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::HandleError { node_id, action } => {
- let logger = WithContext::from(&self.logger, Some(node_id), None);
+ let logger = WithContext::from(&self.logger, Some(node_id), None, None);
match action {
msgs::ErrorAction::DisconnectPeer { msg } => {
if let Some(msg) = msg.as_ref() {
self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
- log_gossip!(WithContext::from(&self.logger, Some(*node_id), None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
+ log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
log_pubkey!(node_id),
msg.short_channel_ids.len(),
msg.first_blocknum,
debug_assert!(peer.their_node_id.is_some());
if let Some((node_id, _)) = peer.their_node_id {
- log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Disconnecting peer with id {} due to {}", node_id, reason);
+ log_trace!(WithContext::from(&self.logger, Some(node_id), None, None), "Disconnecting peer with id {} due to {}", node_id, reason);
self.message_handler.chan_handler.peer_disconnected(&node_id);
self.message_handler.onion_message_handler.peer_disconnected(&node_id);
}
Some(peer_lock) => {
let peer = peer_lock.lock().unwrap();
if let Some((node_id, _)) = peer.their_node_id {
- log_trace!(WithContext::from(&self.logger, Some(node_id), None), "Handling disconnection of peer {}", log_pubkey!(node_id));
+ log_trace!(WithContext::from(&self.logger, Some(node_id), None, None), "Handling disconnection of peer {}", log_pubkey!(node_id));
let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id);
debug_assert!(removed.is_some(), "descriptor maps should be consistent");
if !peer.handshake_complete() { return; }