// Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
// We're definitely a counterparty commitment transaction!
- log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
+ log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
for (idx, outp) in tx.output.iter().enumerate() {
watch_outputs.push((idx as u32, outp.clone()));
}
}
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
- log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
+ log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
macro_rules! check_htlc_fails {
($txid: expr, $commitment_tx: expr, $id: tt) => {
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
- log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
+ log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, 0);
let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, tx.output[0].value, self.counterparty_tx_cache.on_counterparty_tx_csv);
let justice_package = PackageTemplate::build_package(htlc_txid, 0, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height);
let claimable_outpoints = vec!(justice_package);
if self.current_holder_commitment_tx.txid == commitment_txid {
is_holder_tx = true;
- log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
+ log_info!(logger, "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
- log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
+ log_info!(logger, "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
}
pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
- log_trace!(logger, "Getting signed latest holder commitment transaction!");
+ log_debug!(logger, "Getting signed latest holder commitment transaction!");
self.holder_tx_signed = true;
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
- log_trace!(logger, "Getting signed copy of latest holder commitment transaction!");
+ log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
let mut holder_transactions = vec![commitment_tx];
matured_htlcs.push(source.clone());
}
- log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
+ log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
payment_hash: payment_hash,
payment_preimage: None,
}));
},
OnchainEvent::MaturingOutput { descriptor } => {
- log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
+ log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
height,
event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash },
};
- log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
+ log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
height: height,
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
- log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), entry.confirmation_threshold());
+ log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
+ log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;
for req in requests {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
- log_trace!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
+ log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
} else {
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
- log_trace!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
+ log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
continue;
}
if req.package_timelock() > height + 1 {
- log_debug!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
+ log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
for outpoint in req.outpoints() {
- log_debug!(logger, " Outpoint {}", outpoint);
+ log_info!(logger, " Outpoint {}", outpoint);
}
self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
continue;
req.set_feerate(new_feerate);
let txid = tx.txid();
for k in req.outpoints() {
- log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
+ log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
self.claimable_outpoints.insert(k.clone(), (txid, height));
}
self.pending_claim_requests.insert(txid, req);
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
broadcaster.broadcast_transaction(&tx);
}
}
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
for (first_claim_txid, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
broadcaster.broadcast_transaction(&bump_tx);
if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
request.set_timer(new_timer);
});
}
for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
}
- log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
+ log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
return Some(bumped_tx);
},
PackageMalleability::Untractable => {
debug_assert_eq!(value, 0, "value is ignored for non-malleable packages, should be zero to ensure callsites are correct");
if let Some((outpoint, outp)) = self.inputs.first() {
if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
+ log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
return Some(final_tx);
}
return None;
// ...else just increase the previous feerate by 25% (because that's a nice number)
let new_fee = previous_feerate * (predicted_weight as u64) / 750;
if input_amounts <= new_fee {
- log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
+ log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
return None;
}
new_fee
}
} else {
- log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
+ log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
return None;
};
}
}
if let Some((msg, commitment_signed)) = msgs {
- log_debug!(self.logger, "Caliming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+ log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get().get_counterparty_node_id(),