};
let mut deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
read(&mut Cursor::new(&map_entry.get().1)).unwrap().1;
- deserialized_monitor.update_monitor(&update, &&TestBroadcaster {}, &self.logger).unwrap();
+ deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap();
let mut ser = VecWriter(Vec::new());
deserialized_monitor.serialize_for_disk(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
},
Some(orig_monitor) => {
log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor));
- let update_res = orig_monitor.update_monitor(&update, &self.broadcaster, &self.logger);
+ let update_res = orig_monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger);
if let Err(e) = &update_res {
log_error!(self.logger, "Failed to update channel monitor: {:?}", e);
}
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
/// The sequence number of this update. Updates *must* be replayed in-order according to this
/// sequence number (and updates may panic if they are not). The update_id values are strictly
- /// increasing and increase by one for each new update.
+ /// increasing and increase by one for each new update, with one exception specified below.
///
/// This sequence number is also used to track up to which points updates which returned
/// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
/// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
+ ///
+ /// The only instance where update_id values are not strictly increasing is the case where we
+ /// allow post-force-close updates with a special update ID of [`CLOSED_CHANNEL_UPDATE_ID`]. See
+ /// its docs for more details.
+ ///
+ /// [`CLOSED_CHANNEL_UPDATE_ID`]: constant.CLOSED_CHANNEL_UPDATE_ID.html
pub update_id: u64,
}
+/// If:
+/// (1) a channel has been force closed and
+/// (2) we receive a preimage from a forward link that allows us to spend an HTLC output on
+/// this channel's (the backward link's) broadcasted commitment transaction
+/// then we allow the `ChannelManager` to send a `ChannelMonitorUpdate` with this update ID,
+/// with the update providing said payment preimage. No other update types are allowed after
+/// force-close.
+pub const CLOSED_CHANNEL_UPDATE_ID: u64 = std::u64::MAX;
+
impl Writeable for ChannelMonitorUpdate {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
self.update_id.write(w)?;
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
/// commitment_tx_infos which contain the payment hash have been revoked.
- pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) {
+ pub(crate) fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, fee_estimator: &F, logger: &L)
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
+
+ // If the channel is force closed, try to claim the output from this preimage.
+ // First check if a counterparty commitment transaction has been broadcasted:
+ macro_rules! claim_htlcs {
+ ($commitment_number: expr, $txid: expr) => {
+ let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs($commitment_number, $txid, None);
+ self.onchain_tx_handler.update_claims_view(&Vec::new(), htlc_claim_reqs, None, broadcaster, fee_estimator, logger);
+ }
+ }
+ if let Some(txid) = self.current_counterparty_commitment_txid {
+ if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
+ claim_htlcs!(*commitment_number, txid);
+ return;
+ }
+ }
+ if let Some(txid) = self.prev_counterparty_commitment_txid {
+ if let Some(commitment_number) = self.counterparty_commitment_txn_on_chain.get(&txid) {
+ claim_htlcs!(*commitment_number, txid);
+ return;
+ }
+ }
+
+ // Then if a holder commitment transaction has been seen on-chain, broadcast transactions
+ // claiming the HTLC output from each of the holder commitment transactions.
+ // Note that we can't just use `self.holder_tx_signed`, because that only covers the case where
+ // *we* sign a holder commitment transaction, not when e.g. a watchtower broadcasts one of our
+ // holder commitment transactions.
+ if self.broadcasted_holder_revokable_script.is_some() {
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger);
+ if let Some(ref tx) = self.prev_holder_signed_commitment_tx {
+ let (claim_reqs, _) = self.get_broadcasted_holder_claims(&tx);
+ self.onchain_tx_handler.update_claims_view(&Vec::new(), claim_reqs, None, broadcaster, fee_estimator, logger);
+ }
+ }
}
pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
/// itself.
///
/// panics if the given update is not the next update by update_id.
- pub fn update_monitor<B: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
- where B::Target: BroadcasterInterface,
- L::Target: Logger,
+ pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), MonitorUpdateError>
+ where B::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- if self.latest_update_id + 1 != updates.update_id {
+ // ChannelMonitor updates may be applied after force close if we receive a
+ // preimage for a broadcasted commitment transaction HTLC output that we'd
+ // like to claim on-chain. If this is the case, we no longer have guaranteed
+ // access to the monitor's update ID, so we use a sentinel value instead.
+ if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
+ match updates.updates[0] {
+ ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
+ _ => panic!("Attempted to apply post-force-close ChannelMonitorUpdate that wasn't providing a payment preimage"),
+ }
+ assert_eq!(updates.updates.len(), 1);
+ } else if self.latest_update_id + 1 != updates.update_id {
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
}
for update in updates.updates.iter() {
match update {
ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
+ log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info");
if self.lockdown_from_offchain { panic!(); }
self.provide_latest_holder_commitment_tx_info(commitment_tx.clone(), htlc_outputs.clone())?
},
- ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
- self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger),
- ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
- self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
- ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
- self.provide_secret(*idx, *secret)?,
+ ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } => {
+ log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
+ self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs.clone(), *commitment_number, *their_revocation_point, logger)
+ },
+ ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
+ log_trace!(logger, "Updating ChannelMonitor with payment preimage");
+ self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage, broadcaster, fee_estimator, logger)
+ },
+ ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
+ log_trace!(logger, "Updating ChannelMonitor with commitment secret");
+ self.provide_secret(*idx, *secret)?
+ },
ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
+ log_trace!(logger, "Updating ChannelMonitor: channel force closed, should broadcast: {}", should_broadcast);
self.lockdown_from_offchain = true;
if *should_broadcast {
self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
check_htlc_fails!(txid, "previous", 'prev_loop);
}
+ let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx));
+ for req in htlc_claim_reqs {
+ claimable_outpoints.push(req);
+ }
+
+ }
+ (claimable_outpoints, (commitment_txid, watch_outputs))
+ }
+
+ fn get_counterparty_htlc_output_claim_reqs(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>) -> Vec<ClaimRequest> {
+ let mut claims = Vec::new();
+ if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&commitment_txid) {
if let Some(revocation_points) = self.their_cur_revocation_points {
let revocation_point_option =
+ // If the counterparty commitment tx is the latest valid state, use their latest
+ // per-commitment point
if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
else if let Some(point) = revocation_points.2.as_ref() {
+ // If counterparty commitment tx is the state previous to the latest valid state, use
+ // their previous per-commitment point (non-atomicity of revocation means it's valid for
+ // them to temporarily have two valid commitment txns from our viewpoint)
if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
} else { None };
if let Some(revocation_point) = revocation_point_option {
- self.counterparty_payment_script = {
- // Note that the Network here is ignored as we immediately drop the address for the
- // script_pubkey version
- let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize());
- Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script()
- };
-
- // Then, try to find htlc outputs
- for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
+ for (_, &(ref htlc, _)) in htlc_outputs.iter().enumerate() {
if let Some(transaction_output_index) = htlc.transaction_output_index {
- if transaction_output_index as usize >= tx.output.len() ||
- tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
- return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
+ if let Some(transaction) = tx {
+ if transaction_output_index as usize >= transaction.output.len() ||
+ transaction.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
+ return claims; // Corrupted per_commitment_data, fuck this user
+ }
}
- let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
+ let preimage =
+ if htlc.offered {
+ if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) {
+ Some(*p)
+ } else { None }
+ } else { None };
let aggregable = if !htlc.offered { false } else { true };
if preimage.is_some() || !htlc.offered {
let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() };
- claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
+ claims.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
}
}
}
}
}
}
- (claimable_outpoints, (commitment_txid, watch_outputs))
+ claims
}
/// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
(claimable_outpoints, Some((htlc_txid, outputs)))
}
- fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Vec<(u32, TxOut)>, Option<(Script, PublicKey, PublicKey)>) {
+ // Returns (1) `ClaimRequest`s that can be given to the OnChainTxHandler, so that the handler can
+ // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable
+ // script so we can detect whether a holder transaction has been seen on-chain.
+ fn get_broadcasted_holder_claims(&self, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Option<(Script, PublicKey, PublicKey)>) {
let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len());
- let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len());
let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key);
let broadcasted_holder_revokable_script = Some((redeemscript.to_v0_p2wsh(), holder_tx.per_commitment_point.clone(), holder_tx.revocation_key.clone()));
} else { None },
amount: htlc.amount_msat,
}});
- watch_outputs.push((transaction_output_index, commitment_tx.output[transaction_output_index as usize].clone()));
}
}
- (claim_requests, watch_outputs, broadcasted_holder_revokable_script)
+ (claim_requests, broadcasted_holder_revokable_script)
+ }
+
+ // Returns holder HTLC outputs to watch and react to in case of spending.
+ fn get_broadcasted_holder_watch_outputs(&self, holder_tx: &HolderSignedTx, commitment_tx: &Transaction) -> Vec<(u32, TxOut)> {
+ let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len());
+ for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ watch_outputs.push((transaction_output_index, commitment_tx.output[transaction_output_index as usize].clone()));
+ }
+ }
+ watch_outputs
}
/// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
}
macro_rules! append_onchain_update {
- ($updates: expr) => {
+ ($updates: expr, $to_watch: expr) => {
claim_requests = $updates.0;
- watch_outputs.append(&mut $updates.1);
- self.broadcasted_holder_revokable_script = $updates.2;
+ self.broadcasted_holder_revokable_script = $updates.1;
+ watch_outputs.append(&mut $to_watch);
}
}
if self.current_holder_commitment_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
- let mut res = self.broadcast_by_holder_state(tx, &self.current_holder_commitment_tx);
- append_onchain_update!(res);
+ let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx);
+ append_onchain_update!(res, to_watch);
} else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
if holder_tx.txid == commitment_txid {
is_holder_tx = true;
log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
- let mut res = self.broadcast_by_holder_state(tx, holder_tx);
- append_onchain_update!(res);
+ let res = self.get_broadcasted_holder_claims(holder_tx);
+ let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx);
+ append_onchain_update!(res, to_watch);
}
}
self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
self.holder_tx_signed = true;
- let (mut new_outpoints, new_outputs, _) = self.broadcast_by_holder_state(&commitment_tx, &self.current_holder_commitment_tx);
+ let (mut new_outpoints, _) = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx);
+ let new_outputs = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, &commitment_tx);
if !new_outputs.is_empty() {
watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
}
}
}
- self.onchain_tx_handler.block_connected(&txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger);
+ self.onchain_tx_handler.update_claims_view(&txn_matched, claimable_outpoints, Some(height), &&*broadcaster, &&*fee_estimator, &&*logger);
self.last_block_hash = block_hash;
// Determine new outputs to watch by comparing against previously known outputs to watch,
use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
use ln::chan_utils;
use ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction};
- use util::test_utils::TestLogger;
+ use util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator};
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
- use std::sync::Arc;
+ use std::sync::{Arc, Mutex};
use chain::keysinterface::InMemoryChannelKeys;
#[test]
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(TestLogger::new());
+ let broadcaster = Arc::new(TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+ let fee_estimator = Arc::new(TestFeeEstimator { sat_per_kw: 253 });
let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
for &(ref preimage, ref hash) in preimages.iter() {
- monitor.provide_payment_preimage(hash, preimage);
+ monitor.provide_payment_preimage(hash, preimage, &broadcaster, &fee_estimator, &logger);
}
// Now provide a secret, pruning preimages 10-15
use chain;
use chain::Watch;
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
use ln::channel::{Channel, ChannelError};
use ln::features::{InitFeatures, NodeFeatures};
pub(super) enum HTLCForwardInfo {
AddHTLC {
+ forward_info: PendingHTLCInfo,
+
+ // These fields are produced in `forward_htlcs()` and consumed in
+ // `process_pending_htlc_forwards()` for constructing the
+ // `HTLCSource::PreviousHopData` for failed and forwarded
+ // HTLCs.
prev_short_channel_id: u64,
prev_htlc_id: u64,
- forward_info: PendingHTLCInfo,
+ prev_funding_outpoint: OutPoint,
},
FailHTLC {
htlc_id: u64,
short_channel_id: u64,
htlc_id: u64,
incoming_packet_shared_secret: [u8; 32],
+
+ // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
+ // channel with a preimage provided by the forward channel.
+ outpoint: OutPoint,
}
struct ClaimableHTLC {
failed_forwards.reserve(pending_forwards.len());
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info,
+ prev_funding_outpoint } => {
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
});
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing: PendingHTLCRouting::Forward {
onion_packet, ..
- }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, } => {
+ }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
+ prev_funding_outpoint } => {
log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
});
match forward_info {
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
- incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => {
+ incoming_shared_secret, payment_hash, amt_to_forward, .. },
+ prev_funding_outpoint } => {
let prev_hop = HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
htlc_id: prev_htlc_id,
incoming_packet_shared_secret: incoming_shared_secret,
};
);
failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: htlc.prev_hop.short_channel_id,
+ outpoint: prev_funding_outpoint,
htlc_id: htlc.prev_hop.htlc_id,
incoming_packet_shared_secret: htlc.prev_hop.incoming_packet_shared_secret,
}), payment_hash,
}
}
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, .. }) => {
let err_packet = match onion_error {
HTLCFailReason::Reason { failure_code, data } => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
});
},
HTLCSource::PreviousHopData(hop_data) => {
+ let prev_outpoint = hop_data.outpoint;
if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
Ok(()) => Ok(()),
Err(None) => {
- // TODO: There is probably a channel monitor somewhere that needs to
- // learn the preimage as the channel already hit the chain and that's
- // why it's missing.
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage.clone(),
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an offchain preimage event from the forward link (the
+ // event being update_fulfill_htlc).
+ if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ }
Ok(())
},
Err(Some(res)) => Err(res),
let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger);
if !pending_forwards.is_empty() {
- htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
+ htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
}
htlc_failures.append(&mut pending_failures);
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingHTLCInfo, u64)>)]) {
- for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
let mut channel_state = self.channel_state.lock().unwrap();
PendingHTLCRouting::Receive { .. } => 0,
}) {
hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
+ entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
+ prev_htlc_id, forward_info });
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }));
+ entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
+ prev_htlc_id, forward_info }));
}
}
}
msg,
});
}
- break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")))
+ break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
match res {
- Ok((pending_forwards, mut pending_failures, short_channel_id)) => {
+ Ok((pending_forwards, mut pending_failures, short_channel_id, channel_outpoint)) => {
for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
- self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
+ self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
Ok(())
},
Err(e) => Err(e)
impl_writeable!(HTLCPreviousHopData, 0, {
short_channel_id,
+ outpoint,
htlc_id,
incoming_packet_shared_secret
});
impl Writeable for HTLCForwardInfo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
match self {
- &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_htlc_id, ref forward_info } => {
+ &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_funding_outpoint, ref prev_htlc_id, ref forward_info } => {
0u8.write(writer)?;
prev_short_channel_id.write(writer)?;
+ prev_funding_outpoint.write(writer)?;
prev_htlc_id.write(writer)?;
forward_info.write(writer)?;
},
match <u8 as Readable>::read(reader)? {
0 => Ok(HTLCForwardInfo::AddHTLC {
prev_short_channel_id: Readable::read(reader)?,
+ prev_funding_outpoint: Readable::read(reader)?,
prev_htlc_id: Readable::read(reader)?,
forward_info: Readable::read(reader)?,
}),
{
let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
- .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
+ .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger);
}
connect_block(&nodes[2], &block, 1);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
+
+fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
+ // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
+ // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
+ // Carol, Alice would be the upstream node, and Carol the downstream.)
+ //
+ // Steps of the test:
+ // 1) Alice sends a HTLC to Carol through Bob.
+ // 2) Carol doesn't settle the HTLC.
+ // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
+ // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
+ // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
+ // but can't be claimed as Bob doesn't have yet knowledge of the preimage.
+ // 5) Carol release the preimage to Bob off-chain.
+ // 6) Bob claims the offered output on the broadcasted commitment.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ // Steps (1) and (2):
+ // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
+ let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000);
+
+ // Check that Alice's commitment transaction now contains an output for this HTLC.
+ let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
+ check_spends!(alice_txn[0], chan_ab.3);
+ assert_eq!(alice_txn[0].output.len(), 2);
+ check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
+ assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(alice_txn.len(), 2);
+
+ // Steps (3) and (4):
+ // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
+ // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
+ let mut force_closing_node = 0; // Alice force-closes
+ if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
+ nodes[force_closing_node].node.force_close_channel(&chan_ab.2);
+ check_closed_broadcast!(nodes[force_closing_node], false);
+ check_added_monitors!(nodes[force_closing_node], 1);
+ if go_onchain_before_fulfill {
+ let txn_to_broadcast = match broadcast_alice {
+ true => alice_txn.clone(),
+ false => get_local_commitment_txn!(nodes[1], chan_ab.2)
+ };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, 1);
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ if broadcast_alice {
+ check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
+ }
+ assert_eq!(bob_txn.len(), 1);
+ check_spends!(bob_txn[0], chan_ab.3);
+ }
+
+ // Step (5):
+ // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
+ // process of removing the HTLC from their commitment transactions.
+ assert!(nodes[2].node.claim_funds(payment_preimage, &None, 3_000_000));
+ check_added_monitors!(nodes[2], 1);
+ let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(carol_updates.update_add_htlcs.is_empty());
+ assert!(carol_updates.update_fail_htlcs.is_empty());
+ assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
+ assert!(carol_updates.update_fee.is_none());
+ assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
+
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
+ // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
+ if !go_onchain_before_fulfill && broadcast_alice {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
+ // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
+ // Carol<->Bob's updated commitment transaction info.
+ check_added_monitors!(nodes[1], 2);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ let bob_revocation = match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[2].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+ let bob_updates = match events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[2].node.get_our_node_id());
+ (*updates).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
+ check_added_monitors!(nodes[2], 1);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
+ check_added_monitors!(nodes[2], 1);
+
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let carol_revocation = match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
+ check_added_monitors!(nodes[1], 1);
+
+ // If this test requires the force-closed channel to not be on-chain until after the fulfill,
+ // here's where we put said channel's commitment tx on-chain.
+ let mut txn_to_broadcast = alice_txn.clone();
+ if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
+ if !go_onchain_before_fulfill {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]}, 1);
+ // If Bob was the one to force-close, he will have already passed these checks earlier.
+ if broadcast_alice {
+ check_closed_broadcast!(nodes[1], false);
+ check_added_monitors!(nodes[1], 1);
+ }
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ if broadcast_alice {
+ // In `connect_block()`, the ChainMonitor and ChannelManager are separately notified about a
+ // new block being connected. The ChannelManager being notified triggers a monitor update,
+ // which triggers broadcasting our commitment tx and an HTLC-claiming tx. The ChainMonitor
+ // being notified triggers the HTLC-claiming tx redundantly, resulting in 3 total txs being
+ // broadcasted.
+ assert_eq!(bob_txn.len(), 3);
+ check_spends!(bob_txn[1], chan_ab.3);
+ } else {
+ assert_eq!(bob_txn.len(), 2);
+ check_spends!(bob_txn[0], chan_ab.3);
+ }
+ }
+
+ // Step (6):
+ // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
+ // broadcasted commitment transaction.
+ {
+ let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ if go_onchain_before_fulfill {
+ // Bob should now have an extra broadcasted tx, for the preimage-claiming transaction.
+ assert_eq!(bob_txn.len(), 2);
+ }
+ let script_weight = match broadcast_alice {
+ true => OFFERED_HTLC_SCRIPT_WEIGHT,
+ false => ACCEPTED_HTLC_SCRIPT_WEIGHT
+ };
+ // If Alice force-closed and Bob didn't receive her commitment transaction until after he
+ // received Carol's fulfill, he broadcasts the HTLC-output-claiming transaction first. Else if
+ // Bob force closed or if he found out about Alice's commitment tx before receiving Carol's
+ // fulfill, then he broadcasts the HTLC-output-claiming transaction second.
+ if broadcast_alice && !go_onchain_before_fulfill {
+ check_spends!(bob_txn[0], txn_to_broadcast[0]);
+ assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
+ } else {
+ check_spends!(bob_txn[1], txn_to_broadcast[0]);
+ assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+ }
+ }
+}
+
+#[test]
+fn test_onchain_htlc_settlement_after_close() {
+ do_test_onchain_htlc_settlement_after_close(true, true);
+ do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
+ do_test_onchain_htlc_settlement_after_close(true, false);
+ do_test_onchain_htlc_settlement_after_close(false, false);
+}
onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
+ latest_height: u32,
+
secp_ctx: Secp256k1<secp256k1::All>,
}
}
}
}
+ self.latest_height.write(writer)?;
Ok(())
}
}
}
onchain_events_waiting_threshold_conf.insert(height_target, events);
}
+ let latest_height = Readable::read(reader)?;
Ok(OnchainTxHandler {
destination_script,
claimable_outpoints,
pending_claim_requests,
onchain_events_waiting_threshold_conf,
+ latest_height,
secp_ctx: Secp256k1::new(),
})
}
pending_claim_requests: HashMap::new(),
claimable_outpoints: HashMap::new(),
onchain_events_waiting_threshold_conf: HashMap::new(),
+ latest_height: 0,
secp_ctx: Secp256k1::new(),
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
/// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
- fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: F, logger: L) -> Option<(Option<u32>, u32, Transaction)>
+ fn generate_claim_tx<F: Deref, L: Deref>(&mut self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: &F, logger: &L) -> Option<(Option<u32>, u32, Transaction)>
where F::Target: FeeEstimator,
L::Target: Logger,
{
None
}
- pub(crate) fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+ /// Upon channelmonitor.block_connected(..) or upon provision of a preimage on the forward link
+ /// for this channel, provide new relevant on-chain transactions and/or new claim requests.
+ /// Formerly this was named `block_connected`, but it is now also used for claiming an HTLC output
+ /// if we receive a preimage after force-close.
+ pub(crate) fn update_claims_view<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, latest_height: Option<u32>, broadcaster: &B, fee_estimator: &F, logger: &L)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- log_trace!(logger, "Block at height {} connected with {} claim requests", height, claimable_outpoints.len());
+ let height = match latest_height {
+ Some(h) => h,
+ None => self.latest_height,
+ };
+ log_trace!(logger, "Updating claims view at height {} with {} matched transactions and {} claim requests", height, txn_matched.len(), claimable_outpoints.len());
let mut new_claims = Vec::new();
let mut aggregated_claim = HashMap::new();
let mut aggregated_soonest = ::std::u32::MAX;
}
}
for (_, claim_material) in bump_candidates.iter_mut() {
- if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &*fee_estimator, &*logger) {
+ if let Some((new_timer, new_feerate, bump_tx)) = self.generate_claim_tx(height, &claim_material, &&*fee_estimator, &&*logger) {
claim_material.height_timer = new_timer;
claim_material.feerate_previous = new_feerate;
broadcaster.broadcast_transaction(&bump_tx);