honggfuzz = { version = "0.5", optional = true }
libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true }
-[patch.crates-io]
-# Rust-Secp256k1 PR 282. This patch should be dropped once that is merged.
-secp256k1 = { git = 'https://github.com/TheBlueMatt/rust-secp256k1', rev = 'c8615128097e0205dcf69e515e56fb57e5c97138' }
-# bitcoin_hashes PR 111 (without the top commit). This patch should be dropped once that is merged.
-bitcoin_hashes = { git = 'https://github.com/TheBlueMatt/bitcoin_hashes', rev = 'c90d26339a3e34fd2f942aa80298f410cc41b743' }
-
[build-dependencies]
cc = "1.0"
impl<Out: Output> Logger for TestLogger<Out> {
fn log(&self, record: &Record) {
write!(LockedWriteAdapter(&self.out),
- "{:<5} {} [{} : {}, {}] {}\n", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args)
+ "{:<5} {} [{} : {}] {}\n", record.level.to_string(), self.id, record.module_path, record.line, record.args)
.unwrap();
}
}
// Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
// We're definitely a counterparty commitment transaction!
- log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
+ log_error!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
for (idx, outp) in tx.output.iter().enumerate() {
watch_outputs.push((idx as u32, outp.clone()));
}
}
self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number);
- log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
+ log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
macro_rules! check_htlc_fails {
($txid: expr, $commitment_tx: expr, $id: tt) => {
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
- log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
+ log_error!(logger, "Got broadcast of revoked counterparty HTLC transaction, spending {}:{}", htlc_txid, 0);
let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_tx_cache.counterparty_delayed_payment_base_key, self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, tx.output[0].value, self.counterparty_tx_cache.on_counterparty_tx_csv);
let justice_package = PackageTemplate::build_package(htlc_txid, 0, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, true, height);
let claimable_outpoints = vec!(justice_package);
if self.current_holder_commitment_tx.txid == commitment_txid {
is_holder_tx = true;
- log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
+ log_info!(logger, "Got broadcast of latest holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
append_onchain_update!(res, to_watch);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
- log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
+ log_info!(logger, "Got broadcast of previous holder commitment tx {}, searching for available HTLCs to claim", commitment_txid);
let res = self.get_broadcasted_holder_claims(holder_tx, height);
let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
append_onchain_update!(res, to_watch);
}
pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
- log_trace!(logger, "Getting signed latest holder commitment transaction!");
+ log_debug!(logger, "Getting signed latest holder commitment transaction!");
self.holder_tx_signed = true;
let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
/// Note that this includes possibly-locktimed-in-the-future transactions!
fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
- log_trace!(logger, "Getting signed copy of latest holder commitment transaction!");
+ log_debug!(logger, "Getting signed copy of latest holder commitment transaction!");
let commitment_tx = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript);
let txid = commitment_tx.txid();
let mut holder_transactions = vec![commitment_tx];
matured_htlcs.push(source.clone());
}
- log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
+ log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
payment_hash: payment_hash,
payment_preimage: None,
}));
},
OnchainEvent::MaturingOutput { descriptor } => {
- log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
+ log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
self.pending_events.push(Event::SpendableOutputs {
outputs: vec![descriptor]
});
height,
event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash },
};
- log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
+ log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
height: height,
event: OnchainEvent::MaturingOutput { descriptor: spendable_output.clone() },
};
- log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), entry.confirmation_threshold());
+ log_info!(logger, "Received spendable output {}, spendable at height {}", log_spendable!(spendable_output), entry.confirmation_threshold());
self.onchain_events_awaiting_threshold_conf.push(entry);
}
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
+ log_debug!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), requests.len());
let mut preprocessed_requests = Vec::with_capacity(requests.len());
let mut aggregated_request = None;
for req in requests {
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
- log_trace!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
+ log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
} else {
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
.find(|locked_package| locked_package.outpoints() == req.outpoints());
if let Some(package) = timelocked_equivalent_package {
- log_trace!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
+ log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
continue;
}
if req.package_timelock() > height + 1 {
- log_debug!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
+ log_info!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
for outpoint in req.outpoints() {
- log_debug!(logger, " Outpoint {}", outpoint);
+ log_info!(logger, " Outpoint {}", outpoint);
}
self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
continue;
req.set_feerate(new_feerate);
let txid = tx.txid();
for k in req.outpoints() {
- log_trace!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
+ log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout);
self.claimable_outpoints.insert(k.clone(), (txid, height));
}
self.pending_claim_requests.insert(txid, req);
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(tx));
+ log_info!(logger, "Broadcasting onchain {}", log_tx!(tx));
broadcaster.broadcast_transaction(&tx);
}
}
// been aggregated in a single tx and claimed so atomically
if let Some(request) = self.pending_claim_requests.remove(&claim_request) {
for outpoint in request.outpoints() {
+ log_debug!(logger, "Removing claim tracking for {} due to maturation of claim tx {}.", outpoint, claim_request);
self.claimable_outpoints.remove(&outpoint);
}
}
},
OnchainEvent::ContentiousOutpoint { package } => {
+ log_debug!(logger, "Removing claim tracking due to maturation of claim tx for outpoints:");
+ log_debug!(logger, " {:?}", package.outpoints());
self.claimable_outpoints.remove(&package.outpoints()[0]);
}
}
log_trace!(logger, "Bumping {} candidates", bump_candidates.len());
for (first_claim_txid, request) in bump_candidates.iter() {
if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &request, &*fee_estimator, &*logger) {
- log_trace!(logger, "Broadcasting onchain {}", log_tx!(bump_tx));
+ log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx));
broadcaster.broadcast_transaction(&bump_tx);
if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) {
request.set_timer(new_timer);
});
}
for (i, (outpoint, out)) in self.inputs.iter().enumerate() {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
if !out.finalize_input(&mut bumped_tx, i, onchain_handler) { return None; }
}
- log_trace!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
+ log_debug!(logger, "Finalized transaction {} ready to broadcast", bumped_tx.txid());
return Some(bumped_tx);
},
PackageMalleability::Untractable => {
debug_assert_eq!(value, 0, "value is ignored for non-malleable packages, should be zero to ensure callsites are correct");
if let Some((outpoint, outp)) = self.inputs.first() {
if let Some(final_tx) = outp.get_finalized_tx(outpoint, onchain_handler) {
- log_trace!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
- log_trace!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
+ log_debug!(logger, "Adding claiming input for outpoint {}:{}", outpoint.txid, outpoint.vout);
+ log_debug!(logger, "Finalized transaction {} ready to broadcast", final_tx.txid());
return Some(final_tx);
}
return None;
// ...else just increase the previous feerate by 25% (because that's a nice number)
let new_fee = previous_feerate * (predicted_weight as u64) / 750;
if input_amounts <= new_fee {
- log_trace!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
+ log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts);
return None;
}
new_fee
}
} else {
- log_trace!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
+ log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts);
return None;
};
let mut local_htlc_total_msat = 0;
let mut value_to_self_msat_offset = 0;
- log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
+ log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
+ commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
+ get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
+ log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
macro_rules! get_htlc_in_commitment {
($htlc: expr, $offered: expr) => {
_ => {}
}
}
- log_trace!(logger, "Adding HTLC claim to holding_cell! Current state: {}", self.channel_state);
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.channel_state);
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
return Ok((None, Some(monitor_update)));
}
- log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill!", log_bytes!(htlc.payment_hash.0));
+ log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
}
_ => {}
}
}
- log_trace!(logger, "Placing failure for HTLC ID {} in holding cell", htlc_id_arg);
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
htlc_id: htlc_id_arg,
err_packet,
return Ok(None);
}
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message", htlc_id_arg);
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
{
let htlc = &mut self.pending_inbound_htlcs[pending_idx];
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
// They sign the holder commitment transaction...
- log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
+ log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
+ log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
+ encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
+ encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
}
let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
self.cur_counterparty_commitment_transaction_number -= 1;
self.cur_holder_commitment_transaction_number -= 1;
+ log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
+
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
signature
let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0;
self.cur_holder_commitment_transaction_number -= 1;
self.cur_counterparty_commitment_transaction_number -= 1;
+ log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
+
Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
}
- pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
+ pub fn funding_locked<L: Deref>(&mut self, msg: &msgs::FundingLocked, logger: &L) -> Result<(), ChannelError> where L::Target: Logger {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
self.workaround_lnd_bug_4006 = Some(msg.clone());
return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+
+ log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id()));
+
Ok(())
}
if pending_remote_value_msat - msg.amount_msat - chan_reserve_msat < remote_fee_cost_incl_stuck_buffer_msat {
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
// the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation");
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
} else {
let bitcoin_tx = trusted_tx.built_transaction();
let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
- log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
+ log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
+ log_bytes!(msg.signature.serialize_compact()[..]),
+ log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
+ log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
}
let htlc_tx = self.build_htlc_transaction(&commitment_txid, &htlc, true, &keys, feerate_per_kw);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
let htlc_sighash = hash_to_message!(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]);
- log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript));
+ log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
+ log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
+ encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
}
Some(forward_info.clone())
} else { None };
if let Some(forward_info) = new_forward {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
need_commitment = true;
}
if let Some(fail_reason) = if let &mut OutboundHTLCState::RemoteRemoved(ref mut fail_reason) = &mut htlc.state {
Some(fail_reason.take())
} else { None } {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(fail_reason);
need_commitment = true;
}
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
}
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ log_bytes!(self.channel_id));
// TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
// re-send the message on restoration)
return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
(None, self.maybe_propose_first_closing_signed(fee_estimator))
} else { (None, None) };
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
+
Ok((msgs::RevokeAndACK {
channel_id: self.channel_id,
per_commitment_secret,
fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
- log_trace!(logger, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" });
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
+ if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id()));
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id + 1, // We don't increment this yet!
Err(e) => {
match e {
ChannelError::Ignore(ref msg) => {
- log_info!(logger, "Failed to send HTLC with payment_hash {} due to {}", log_bytes!(payment_hash.0), msg);
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
+ log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
// If we fail to send here, then this HTLC should
// be failed backwards. Failing to send here
// indicates that this HTLC may keep being put back
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
+
Ok((Some((msgs::CommitmentUpdate {
update_add_htlcs,
update_fulfill_htlcs,
self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
self.cur_counterparty_commitment_transaction_number -= 1;
- log_trace!(logger, "Updating HTLCs on receipt of RAA...");
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id()));
let mut to_forward_infos = Vec::new();
let mut revoked_htlcs = Vec::new();
let mut update_fail_htlcs = Vec::new();
});
for htlc in pending_inbound_htlcs.iter_mut() {
let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to Committed", log_bytes!(htlc.payment_hash.0));
true
} else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
true
} else { false };
if swap {
mem::swap(&mut state, &mut htlc.state);
if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
require_commitment = true;
} else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
match forward_info {
PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
require_commitment = true;
match fail_msg {
HTLCFailureMsg::Relay(msg) => {
}
},
PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
to_forward_infos.push((forward_info, htlc.htlc_id));
htlc.state = InboundHTLCState::Committed;
}
}
self.monitor_pending_forwards.append(&mut to_forward_infos);
self.monitor_pending_failures.append(&mut revoked_htlcs);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
return Ok((None, Vec::new(), Vec::new(), None, monitor_update, Vec::new()))
}
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
+ log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
Ok((Some(msgs::CommitmentUpdate {
update_add_htlcs: Vec::new(),
update_fulfill_htlcs: Vec::new(),
commitment_signed
}), to_forward_infos, revoked_htlcs, None, monitor_update, htlcs_to_fail))
} else {
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update, htlcs_to_fail))
}
}
}
self.channel_state |= ChannelState::PeerDisconnected as u32;
- log_debug!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
}
/// Indicates that a ChannelMonitor update failed to be stored by the client and further
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
let order = self.resend_order.clone();
- log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
- if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
- if commitment_update.is_some() { "a" } else { "no" },
- if raa.is_some() { "an" } else { "no" },
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
(raa, commitment_update, order, forwards, failures, funding_broadcastable, funding_locked)
}
}
}
- log_trace!(logger, "Regenerated latest commitment update with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ log_trace!(logger, "Regenerated latest commitment update in channel {} with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ log_bytes!(self.channel_id()), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs,
update_fee: None,
// send it immediately instead of waiting for a best_block_updated call (which
// may have already happened for this block).
if let Some(funding_locked) = self.check_get_funding_locked(height) {
+ log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
return Ok(Some(funding_locked));
}
}
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
- log_trace!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
+ log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: "Commitment or closing transaction was confirmed on chain.".to_owned()
///
/// May return some HTLCs (and their payment_hash) which have timed out and should be failed
/// back.
- pub fn best_block_updated(&mut self, height: u32, highest_header_time: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
+ pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, logger: &L)
+ -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> where L::Target: Logger {
let mut timed_out_htlcs = Vec::new();
let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
self.holding_cell_htlc_updates.retain(|htlc_update| {
self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
if let Some(funding_locked) = self.check_get_funding_locked(height) {
+ log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
return Ok((Some(funding_locked), timed_out_htlcs));
}
/// Indicates the funding transaction is no longer confirmed in the main chain. This may
/// force-close the channel, but may also indicate a harmless reorganization of a block or two
/// before the channel has reached funding_locked and we can just wait for more blocks.
- pub fn funding_transaction_unconfirmed(&mut self) -> Result<(), msgs::ErrorMessage> {
+ pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), msgs::ErrorMessage> where L::Target: Logger {
if self.funding_tx_confirmation_height != 0 {
// We handle the funding disconnection by calling best_block_updated with a height one
// below where our funding was connected, implying a reorg back to conf_height - 1.
// larger. If we don't know that time has moved forward, we can just set it to the last
// time we saw and it will be ignored.
let best_time = self.update_time_counter;
- match self.best_block_updated(reorg_height, best_time) {
+ match self.best_block_updated(reorg_height, best_time, logger) {
Ok((funding_locked, timed_out_htlcs)) => {
assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?");
assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap();
- log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
+ log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
OptionalField::Present(DataLossProtect {
your_last_per_commitment_secret: remote_last_secret,
my_current_per_commitment_point: dummy_pubkey
})
} else {
- log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret");
+ log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
OptionalField::Present(DataLossProtect {
your_last_per_commitment_secret: [0;32],
my_current_per_commitment_point: dummy_pubkey,
signature = res.0;
htlc_signatures = res.1;
- log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}",
+ log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
encode::serialize_hex(&counterparty_commitment_tx.0.trust().built_transaction().transaction),
- &counterparty_commitment_txid,
- encode::serialize_hex(&self.get_funding_redeemscript()),
- log_bytes!(signature.serialize_compact()[..]));
+ &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()),
+ log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id()));
for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
- log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}",
+ log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &counterparty_keys)),
log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
- log_bytes!(htlc_sig.serialize_compact()[..]));
+ log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
}
}
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
},
ChannelError::Close(msg) => {
- log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+ log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
if let Some(short_id) = $channel.get_short_channel_id() {
$short_to_id.remove(&short_id);
}
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
- log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
+ log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
};
- log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.finish_force_close_channel(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
return Err(APIError::MonitorUpdateFailed);
}
+ log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: path.first().unwrap().pubkey,
updates: msgs::CommitmentUpdate {
onion_packet, ..
}, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
prev_funding_outpoint } => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
+ log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
outpoint: prev_funding_outpoint,
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
+ log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
} else {
panic!("Stated return value requirements in get_update_fail_htlc() were not met");
}
handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
continue;
}
+ log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+ add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get().get_counterparty_node_id(),
updates: msgs::CommitmentUpdate {
}
}
if let Some((msg, commitment_signed)) = msgs {
+ log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+ log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get().get_counterparty_node_id(),
updates: msgs::CommitmentUpdate {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan);
if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id()));
// If we see locking block before receiving remote funding_locked, we broadcast our
match channel_state.by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+ if chan.get().should_announce() {
+ // If the announcement is about a channel of ours which is public, some
+ // other peer may simply be forwarding all its gossip to us. Don't provide
+ // a scary-looking error message and return Ok instead.
+ return Ok(());
+ }
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
},
if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
unimplemented!();
}
+ log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get().get_counterparty_node_id(),
updates: msgs::CommitmentUpdate {
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger));
}
}
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger));
macro_rules! max_time {
($timestamp: expr) => {
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.get_funding_txo() {
if funding_txo.txid == *txid {
- channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+ channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new()))
} else { Ok((None, Vec::new())) }
} else { Ok((None, Vec::new())) }
});
channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
// If the channel is ahead of the monitor, return InvalidValue:
+ log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
return Err(DecodeError::InvalidValue);
} else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
by_id.insert(channel.channel_id(), channel);
}
} else {
+ log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
return Err(DecodeError::InvalidValue);
}
}
},
_ => panic!("Unexpected event"),
};
- nodes[1].logger.assert_log("lightning::ln::channel".to_string(), "Attempting to fail HTLC due to fee spike buffer violation".to_string(), 1);
+ nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
+ format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
check_added_monitors!(nodes[1], 2);
}
// us to surface its failure to the user.
chan_stat = get_channel_value_stat!(nodes[0], chan.2);
assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
- nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 1 HTLC updates".to_string(), 1);
- let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", log_bytes!(our_payment_hash.0), chan_stat.channel_reserve_msat);
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+ let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
+ hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
// Check that the payment failed to be sent out.
// to surface its failure to the user. The first payment should succeed.
chan_stat = get_channel_value_stat!(nodes[0], chan.2);
assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
- nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 2 HTLC updates".to_string(), 1);
- let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", log_bytes!(payment_hash_2.0), chan_stat.channel_reserve_msat);
+ nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+ let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
+ hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
// Check that the second payment failed to be sent out.
use std::io::Read;
use util::events::MessageSendEventsProvider;
+use util::logger;
use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
msg: Option<ErrorMessage>
},
/// The peer did something harmless that we weren't able to process, just log and ignore
+ // New code should *not* use this. New code must use IgnoreAndLog, below!
IgnoreError,
+ /// The peer did something harmless that we weren't able to meaningfully process.
+ /// If the error is logged, log it at the given level.
+ IgnoreAndLog(logger::Level),
/// The peer did something incorrect. Tell them.
SendErrorMessage {
/// The message to send.
use ln::wire::Encode;
use util::byte_utils;
use util::events::{MessageSendEvent, MessageSendEventsProvider};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
use routing::network_graph::NetGraphMsgHandler;
use prelude::*;
use alloc::collections::LinkedList;
+use alloc::fmt::Debug;
use std::sync::{Arc, Mutex};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::{cmp, hash, fmt, mem};
}
fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
- macro_rules! encode_and_send_msg {
- ($msg: expr) => {
- {
- log_trace!(self.logger, "Encoding and sending sync update message of type {} to {}", $msg.type_id(), log_pubkey!(peer.their_node_id.unwrap()));
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!($msg)[..]));
- }
- }
- }
while !peer.awaiting_write_event {
if peer.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE {
match peer.sync_status {
let steps = ((OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
let all_messages = self.message_handler.route_handler.get_next_channel_announcements(c, steps);
for &(ref announce, ref update_a_option, ref update_b_option) in all_messages.iter() {
- encode_and_send_msg!(announce);
+ self.enqueue_message(peer, announce);
if let &Some(ref update_a) = update_a_option {
- encode_and_send_msg!(update_a);
+ self.enqueue_message(peer, update_a);
}
if let &Some(ref update_b) = update_b_option {
- encode_and_send_msg!(update_b);
+ self.enqueue_message(peer, update_b);
}
peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
}
let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
for msg in all_messages.iter() {
- encode_and_send_msg!(msg);
+ self.enqueue_message(peer, msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
}
if all_messages.is_empty() || all_messages.len() != steps as usize {
let steps = (OUTBOUND_BUFFER_LIMIT_READ_PAUSE - peer.pending_outbound_buffer.len()) as u8;
let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
for msg in all_messages.iter() {
- encode_and_send_msg!(msg);
+ self.enqueue_message(peer, msg);
peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
}
if all_messages.is_empty() || all_messages.len() != steps as usize {
}
/// Append a message to a peer's pending outbound/write buffer, and update the map of peers needing sends accordingly.
- fn enqueue_message<M: Encode + Writeable>(&self, peer: &mut Peer, message: &M) {
+ fn enqueue_message<M: Encode + Writeable + Debug>(&self, peer: &mut Peer, message: &M) {
let mut buffer = VecWriter(Vec::new());
wire::write(message, &mut buffer).unwrap(); // crash if the write failed
let encoded_message = buffer.0;
- log_trace!(self.logger, "Enqueueing message of type {} to {}", message.type_id(), log_pubkey!(peer.their_node_id.unwrap()));
+ log_trace!(self.logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap()));
peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_message[..]));
}
match e.action {
msgs::ErrorAction::DisconnectPeer { msg: _ } => {
//TODO: Try to push msg
- log_trace!(self.logger, "Got Err handling message, disconnecting peer because {}", e.err);
+ log_debug!(self.logger, "Error handling message; disconnecting peer with: {}", e.err);
return Err(PeerHandleError{ no_connection_possible: false });
},
+ msgs::ErrorAction::IgnoreAndLog(level) => {
+ log_given_level!(self.logger, level, "Error handling message; ignoring: {}", e.err);
+ continue
+ },
msgs::ErrorAction::IgnoreError => {
- log_trace!(self.logger, "Got Err handling message, ignoring because {}", e.err);
+ log_debug!(self.logger, "Error handling message; ignoring: {}", e.err);
continue;
},
msgs::ErrorAction::SendErrorMessage { msg } => {
- log_trace!(self.logger, "Got Err handling message, sending Error message because {}", e.err);
+ log_debug!(self.logger, "Error handling message; sending error message with: {}", e.err);
self.enqueue_message(peer, &msg);
continue;
},
return Err(PeerHandleError{ no_connection_possible: false })
},
hash_map::Entry::Vacant(entry) => {
- log_trace!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
+ log_debug!(self.logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
entry.insert(peer_descriptor.clone())
},
};
match e {
msgs::DecodeError::UnknownVersion => return Err(PeerHandleError { no_connection_possible: false }),
msgs::DecodeError::UnknownRequiredFeature => {
- log_debug!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!");
+ log_trace!(self.logger, "Got a channel/node announcement with an known required feature flag, you may want to update!");
continue;
}
msgs::DecodeError::InvalidValue => {
msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError { no_connection_possible: false }),
msgs::DecodeError::Io(_) => return Err(PeerHandleError { no_connection_possible: false }),
msgs::DecodeError::UnsupportedCompression => {
- log_debug!(self.logger, "We don't support zlib-compressed message fields, ignoring message");
+ log_trace!(self.logger, "We don't support zlib-compressed message fields, ignoring message");
continue;
}
}
/// Process an incoming message and return a decision (ok, lightning error, peer handling error) regarding the next action with the peer
/// Returns the message back if it needs to be broadcasted to all other peers.
fn handle_message(&self, peer: &mut Peer, message: wire::Message) -> Result<Option<wire::Message>, MessageHandlingError> {
- log_trace!(self.logger, "Received message of type {} from {}", message.type_id(), log_pubkey!(peer.their_node_id.unwrap()));
+ log_trace!(self.logger, "Received message {:?} from {}", message, log_pubkey!(peer.their_node_id.unwrap()));
// Need an Init as first message
if let wire::Message::Init(_) = message {
} else if peer.their_features.is_none() {
- log_trace!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap()));
+ log_debug!(self.logger, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap()));
return Err(PeerHandleError{ no_connection_possible: false }.into());
}
// Setup and Control messages:
wire::Message::Init(msg) => {
if msg.features.requires_unknown_bits() {
- log_info!(self.logger, "Peer features required unknown version bits");
+ log_debug!(self.logger, "Peer features required unknown version bits");
return Err(PeerHandleError{ no_connection_possible: true }.into());
}
if peer.their_features.is_some() {
fn forward_broadcast_msg(&self, peers: &mut PeerHolder<Descriptor>, msg: &wire::Message, except_node: Option<&PublicKey>) {
match msg {
wire::Message::ChannelAnnouncement(ref msg) => {
+ log_trace!(self.logger, "Sending message to all peers except {:?} or the announced channel's counterparties: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
for (_, peer) in peers.peers.iter_mut() {
continue
}
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
+ log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if peer.their_node_id.as_ref() == Some(&msg.contents.node_id_1) ||
}
},
wire::Message::NodeAnnouncement(ref msg) => {
+ log_trace!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
for (_, peer) in peers.peers.iter_mut() {
continue
}
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
+ log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if peer.their_node_id.as_ref() == Some(&msg.contents.node_id) {
}
},
wire::Message::ChannelUpdate(ref msg) => {
+ log_trace!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg);
let encoded_msg = encode_msg!(msg);
for (_, peer) in peers.peers.iter_mut() {
continue
}
if peer.pending_outbound_buffer.len() > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP {
+ log_trace!(self.logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id);
continue;
}
if except_node.is_some() && peer.their_node_id.as_ref() == except_node {
}
match event {
MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.temporary_channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.temporary_channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+ log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
log_pubkey!(node_id),
log_bytes!(msg.temporary_channel_id),
log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
// TODO: If the peer is gone we should generate a DiscardFunding event
// indicating to the wallet that they should just throw away this funding transaction
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+ log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
- log_trace!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+ log_debug!(self.logger, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
log_pubkey!(node_id),
update_add_htlcs.len(),
update_fulfill_htlcs.len(),
log_bytes!(commitment_signed.channel_id));
let peer = get_peer_for_forwarding!(node_id);
for msg in update_add_htlcs {
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(peer, msg);
}
for msg in update_fulfill_htlcs {
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(peer, msg);
}
for msg in update_fail_htlcs {
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(peer, msg);
}
for msg in update_fail_malformed_htlcs {
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(peer, msg);
}
if let &Some(ref msg) = update_fee {
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(peer, msg);
}
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_signed)));
+ self.enqueue_message(peer, commitment_signed);
},
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
- log_trace!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+ log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
log_pubkey!(node_id),
log_bytes!(msg.channel_id));
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => {
- log_trace!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+ log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
if self.message_handler.route_handler.handle_channel_announcement(&msg).is_ok() && self.message_handler.route_handler.handle_channel_update(&update_msg).is_ok() {
self.forward_broadcast_msg(peers, &wire::Message::ChannelAnnouncement(msg), None);
self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(update_msg), None);
}
},
MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
- log_trace!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler");
+ log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler");
if self.message_handler.route_handler.handle_node_announcement(&msg).is_ok() {
self.forward_broadcast_msg(peers, &wire::Message::NodeAnnouncement(msg), None);
}
},
MessageSendEvent::BroadcastChannelUpdate { msg } => {
- log_trace!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+ log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
if self.message_handler.route_handler.handle_channel_update(&msg).is_ok() {
self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None);
}
log_trace!(self.logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(&mut peer, msg);
// This isn't guaranteed to work, but if there is enough free
// room in the send buffer, put the error message there...
self.do_attempt_write_data(&mut descriptor, &mut peer);
self.message_handler.chan_handler.peer_disconnected(&node_id, false);
}
},
- msgs::ErrorAction::IgnoreError => {},
+ msgs::ErrorAction::IgnoreAndLog(level) => {
+ log_given_level!(self.logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+ },
+ msgs::ErrorAction::IgnoreError => {
+ log_debug!(self.logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id));
+ },
msgs::ErrorAction::SendErrorMessage { ref msg } => {
log_trace!(self.logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
log_pubkey!(node_id),
msg.data);
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
}
},
MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => {
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
},
MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => {
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
}
MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => {
log_trace!(self.logger, "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}",
msg.first_blocknum,
msg.number_of_blocks,
msg.sync_complete);
- let peer = get_peer_for_forwarding!(node_id);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg)));
+ self.enqueue_message(get_peer_for_forwarding!(node_id), msg);
}
}
}
ponglen: 0,
byteslen: 64,
};
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(&ping)));
+ self.enqueue_message(peer, &ping);
let mut descriptor_clone = descriptor.clone();
self.do_attempt_write_data(&mut descriptor_clone, peer);
/// A Lightning message returned by [`read()`] when decoding bytes received over the wire. Each
/// variant contains a message from [`msgs`] or otherwise the message type if unknown.
#[allow(missing_docs)]
+#[derive(Debug)]
pub enum Message {
Init(msgs::Init),
Error(msgs::ErrorMessage),
}
/// A number identifying a message to determine how it is encoded on the wire.
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, Debug)]
pub struct MessageType(u16);
impl Message {
+ #[allow(dead_code)] // This method is only used in tests
/// Returns the type that was used to decode the message payload.
pub fn type_id(&self) -> MessageType {
match self {
use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
use util::events::{MessageSendEvent, MessageSendEventsProvider};
use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) {
match update {
&msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => {
+ let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
+ log_debug!(self.logger, "Updating channel with channel_update from a payment failure. Channel {} is {}abled.", msg.contents.short_channel_id, if chan_enabled { "en" } else { "dis" });
let _ = self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx);
},
&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id, is_permanent } => {
+ log_debug!(self.logger, "{} channel graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, short_channel_id);
self.network_graph.write().unwrap().close_channel_from_update(short_channel_id, is_permanent);
},
&msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, is_permanent } => {
+ log_debug!(self.logger, "{} node graph entry for {} due to a payment failure.", if is_permanent { "Removing" } else { "Disabling" }, node_id);
self.network_graph.write().unwrap().fail_node(node_id, is_permanent);
},
}
Some(node) => {
if let Some(node_info) = node.announcement_info.as_ref() {
if node_info.last_update >= msg.timestamp {
- return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+ return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
}
}
Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.short_channel_id);
*entry.get_mut() = chan_info;
} else {
- return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError})
+ return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)})
}
},
BtreeEntry::Vacant(entry) => {
( $target: expr, $src_node: expr) => {
if let Some(existing_chan_info) = $target.as_ref() {
if existing_chan_info.last_update >= msg.timestamp {
- return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
+ return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreAndLog(Level::Trace)});
}
chan_was_enabled = existing_chan_info.enabled;
} else {
/// so that we can choose cheaper paths (as per Dijkstra's algorithm).
/// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
/// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
-#[derive(Clone)]
+#[derive(Clone, Debug)]
struct PathBuildingHop<'a> {
// The RouteHintHop fields which will eventually be used if this hop is used in a final Route.
// Note that node_features is calculated separately after our initial graph walk.
// - when we want to stop looking for new paths.
let mut already_collected_value_msat = 0;
+ log_trace!(logger, "Building path from {} (payee) to {} (us/payer) for value {} msat.", payee, our_node_id, final_value_msat);
+
macro_rules! add_entry {
// Adds entry which goes from $src_node_id to $dest_node_id
// over the channel with id $chan_id with fees described in
}
}
+ log_trace!(logger, "Starting main path collection loop with {} nodes pre-filled from first/last hops.", targets.len());
+
// At this point, targets are filled with the data from first and
// last hops communicated by the caller, and the payment receiver.
let mut found_new_path = false;
ordered_hops.last_mut().unwrap().0.hop_use_fee_msat = 0;
ordered_hops.last_mut().unwrap().0.cltv_expiry_delta = final_cltv;
+ log_trace!(logger, "Found a path back to us from the target with {} hops contributing up to {} msat: {:?}",
+ ordered_hops.len(), value_contribution_msat, ordered_hops);
+
let mut payment_path = PaymentPath {hops: ordered_hops};
// We could have possibly constructed a slightly inconsistent path: since we reduce
// If we weren't capped by hitting a liquidity limit on a channel in the path,
// we'll probably end up picking the same path again on the next iteration.
// Decrease the available liquidity of a hop in the middle of the path.
- let victim_liquidity = bookkeeped_channels_liquidity_available_msat.get_mut(
- &payment_path.hops[(payment_path.hops.len() - 1) / 2].0.short_channel_id).unwrap();
+ let victim_scid = payment_path.hops[(payment_path.hops.len() - 1) / 2].0.short_channel_id;
+ log_trace!(logger, "Disabling channel {} for future path building iterations to avoid duplicates.", victim_scid);
+ let victim_liquidity = bookkeeped_channels_liquidity_available_msat.get_mut(&victim_scid).unwrap();
*victim_liquidity = 0;
}
// In the latter case, making another path finding attempt won't help,
// because we deterministically terminated the search due to low liquidity.
if already_collected_value_msat >= recommended_value_msat || !found_new_path {
+ log_trace!(logger, "Have now collected {} msat (seeking {} msat) in paths. Last path loop {} a new path.",
+ already_collected_value_msat, recommended_value_msat, if found_new_path { "found" } else { "did not find" });
break 'paths_collection;
} else if found_new_path && already_collected_value_msat == final_value_msat && payment_paths.len() == 1 {
// Further, if this was our first walk of the graph, and we weren't limited by an
// potentially allowing us to pay fees to meet the htlc_minimum on the new path while
// still keeping a lower total fee than this path.
if !hit_minimum_limit {
+ log_trace!(logger, "Collected exactly our payment amount on the first pass, without hitting an htlc_minimum_msat limit, exiting.");
break 'paths_collection;
}
+ log_trace!(logger, "Collected our payment amount on the first pass, but running again to collect extra paths with a potentially higher limit.");
path_value_msat = recommended_value_msat;
}
}
}
let route = Route { paths: selected_paths };
- log_trace!(logger, "Got route: {}", log_route!(route));
+ log_info!(logger, "Got route to {}: {}", payee, log_route!(route));
Ok(route)
}
use core::cmp;
use core::fmt;
-static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"];
+static LOG_LEVEL_NAMES: [&'static str; 5] = ["TRACE", "DEBUG", "INFO", "WARN", "ERROR"];
/// An enum representing the available verbosity levels of the logger.
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub enum Level {
- ///Designates logger being silent
- Off,
- /// Designates very serious errors
- Error,
- /// Designates hazardous situations
- Warn,
- /// Designates useful information
- Info,
- /// Designates lower priority information
- Debug,
/// Designates very low priority, often extremely verbose, information
Trace,
+ /// Designates lower priority information
+ Debug,
+ /// Designates useful information
+ Info,
+ /// Designates hazardous situations
+ Warn,
+ /// Designates very serious errors
+ Error,
}
impl PartialOrd for Level {
let wrapper = WrapperLog::new(Arc::clone(&logger));
wrapper.call_macros();
}
+
+ #[test]
+ fn test_log_ordering() {
+ assert!(Level::Error > Level::Warn);
+ assert!(Level::Error >= Level::Warn);
+ assert!(Level::Error >= Level::Error);
+ assert!(Level::Warn > Level::Info);
+ assert!(Level::Warn >= Level::Info);
+ assert!(Level::Warn >= Level::Warn);
+ assert!(Level::Info > Level::Debug);
+ assert!(Level::Info >= Level::Debug);
+ assert!(Level::Info >= Level::Info);
+ assert!(Level::Debug > Level::Trace);
+ assert!(Level::Debug >= Level::Trace);
+ assert!(Level::Debug >= Level::Debug);
+ assert!(Level::Trace >= Level::Trace);
+
+ assert!(Level::Error <= Level::Error);
+ assert!(Level::Warn < Level::Error);
+ assert!(Level::Warn <= Level::Error);
+ assert!(Level::Warn <= Level::Warn);
+ assert!(Level::Info < Level::Warn);
+ assert!(Level::Info <= Level::Warn);
+ assert!(Level::Info <= Level::Info);
+ assert!(Level::Debug < Level::Info);
+ assert!(Level::Debug <= Level::Info);
+ assert!(Level::Debug <= Level::Debug);
+ assert!(Level::Trace < Level::Debug);
+ assert!(Level::Trace <= Level::Debug);
+ assert!(Level::Trace <= Level::Trace);
+ }
}
);
}
+/// Logs an entry at the given level.
+#[macro_export]
+macro_rules! log_given_level {
+ ($logger: expr, $lvl:expr, $($arg:tt)+) => (
+ match $lvl {
+ #[cfg(not(any(feature = "max_level_off")))]
+ $crate::util::logger::Level::Error => log_internal!($logger, $lvl, $($arg)*),
+ #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))]
+ $crate::util::logger::Level::Warn => log_internal!($logger, $lvl, $($arg)*),
+ #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))]
+ $crate::util::logger::Level::Info => log_internal!($logger, $lvl, $($arg)*),
+ #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))]
+ $crate::util::logger::Level::Debug => log_internal!($logger, $lvl, $($arg)*),
+ #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))]
+ $crate::util::logger::Level::Trace => log_internal!($logger, $lvl, $($arg)*),
+
+ #[cfg(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug"))]
+ _ => {
+ // The level is disabled at compile-time
+ },
+ }
+ );
+}
+
/// Log an error.
#[macro_export]
macro_rules! log_error {
($logger: expr, $($arg:tt)*) => (
- #[cfg(not(any(feature = "max_level_off")))]
- log_internal!($logger, $crate::util::logger::Level::Error, $($arg)*);
+ log_given_level!($logger, $crate::util::logger::Level::Error, $($arg)*);
)
}
macro_rules! log_warn {
($logger: expr, $($arg:tt)*) => (
- #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))]
- log_internal!($logger, $crate::util::logger::Level::Warn, $($arg)*);
+ log_given_level!($logger, $crate::util::logger::Level::Warn, $($arg)*);
)
}
macro_rules! log_info {
($logger: expr, $($arg:tt)*) => (
- #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))]
- log_internal!($logger, $crate::util::logger::Level::Info, $($arg)*);
+ log_given_level!($logger, $crate::util::logger::Level::Info, $($arg)*);
)
}
macro_rules! log_debug {
($logger: expr, $($arg:tt)*) => (
- #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))]
- log_internal!($logger, $crate::util::logger::Level::Debug, $($arg)*);
+ log_given_level!($logger, $crate::util::logger::Level::Debug, $($arg)*);
)
}
#[macro_export]
macro_rules! log_trace {
($logger: expr, $($arg:tt)*) => (
- #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))]
- log_internal!($logger, $crate::util::logger::Level::Trace, $($arg)*);
+ log_given_level!($logger, $crate::util::logger::Level::Trace, $($arg)*);
)
}