//!
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
-//! be made in responding to certain messages, see ManyChannelMonitor for more.
+//! be made in responding to certain messages, see [`chain::Watch`] for more.
//!
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
//! security-domain-separated system design, you should consider having multiple paths for
//! ChannelMonitors to get out of the HSM and onto monitoring devices.
+//!
+//! [`chain::Watch`]: ../../chain/trait.Watch.html
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::{TxOut,Transaction};
use ln::msgs::DecodeError;
use ln::chan_utils;
-use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, HTLCType};
use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
-use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator};
+use chain;
+use chain::chaininterface::{ChainListener, ChainWatchedUtil, BroadcasterInterface, FeeEstimator};
use chain::transaction::OutPoint;
use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
use util::logger::Logger;
use util::{byte_utils, events};
use util::events::Event;
-use std::collections::{HashMap, hash_map};
+use std::collections::{HashMap, HashSet, hash_map};
use std::sync::Mutex;
use std::{hash,cmp, mem};
use std::ops::Deref;
TemporaryFailure,
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
/// different watchtower and cannot update with all watchtowers that were previously informed
- /// of this channel). This will force-close the channel in question (which will generate one
- /// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy).
+ /// of this channel).
+ ///
+ /// At reception of this error, ChannelManager will force-close the channel and return at
+ /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
+ /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
+ /// update must be rejected.
+ ///
+ /// This failure may also signal a failure to update the local persisted copy of one of
+ /// the channel monitor instance.
///
- /// Should also be used to indicate a failure to update the local persisted copy of the channel
- /// monitor.
+ /// Note that even when you fail a holder commitment transaction update, you must store the
+ /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
+ /// broadcasts it (e.g distributed channel-monitor deployment)
+ ///
+ /// In case of distributed watchtowers deployment, the new version must be written to disk, as
+ /// state may have been stored but rejected due to a block forcing a commitment broadcast. This
+ /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
+ /// lagging behind on block processing.
PermanentFailure,
}
CommitmentTxBroadcasted(OutPoint),
}
-/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
-/// forward channel and from which info are needed to update HTLC in a backward channel.
+/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
+/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
+/// preimage claim backward will lead to loss of funds.
+///
+/// [`chain::Watch`]: ../../chain/trait.Watch.html
#[derive(Clone, PartialEq)]
pub struct HTLCUpdate {
pub(super) payment_hash: PaymentHash,
}
impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
-/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
+/// A simple implementation of a [`chain::Watch`] and ChainListener. Can be used to create a
/// watchtower or watch our own channels.
///
/// Note that you must provide your own key by which to refer to channels.
/// index by a PublicKey which is required to sign any updates.
///
/// If you're using this for local monitoring of your own channels, you probably want to use
-/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
+/// `OutPoint` as the key, which will give you a [`chain::Watch`] implementation.
+///
+/// [`chain::Watch`]: ../../chain/trait.Watch.html
///
/// (C-not exported) due to an unconstrained generic in `Key`
-pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref>
+pub struct ChainMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- C::Target: ChainWatchInterface,
{
/// The monitors
pub monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
- chain_monitor: C,
+ watch_events: Mutex<WatchEventQueue>,
broadcaster: T,
logger: L,
fee_estimator: F
}
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send>
- ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
+struct WatchEventQueue {
+ watched: ChainWatchedUtil,
+ events: Vec<chain::WatchEvent>,
+}
+
+impl WatchEventQueue {
+ fn new() -> Self {
+ Self {
+ watched: ChainWatchedUtil::new(),
+ events: Vec::new(),
+ }
+ }
+
+ fn watch_tx(&mut self, txid: &Txid, script_pubkey: &Script) {
+ if self.watched.register_tx(txid, script_pubkey) {
+ self.events.push(chain::WatchEvent::WatchTransaction {
+ txid: *txid,
+ script_pubkey: script_pubkey.clone()
+ });
+ }
+ }
+
+ fn watch_output(&mut self, outpoint: (&Txid, usize), script_pubkey: &Script) {
+ let (txid, index) = outpoint;
+ if self.watched.register_outpoint((*txid, index as u32), script_pubkey) {
+ self.events.push(chain::WatchEvent::WatchOutput {
+ outpoint: OutPoint {
+ txid: *txid,
+ index: index as u16,
+ },
+ script_pubkey: script_pubkey.clone(),
+ });
+ }
+ }
+
+ fn dequeue_events(&mut self) -> Vec<chain::WatchEvent> {
+ let mut pending_events = Vec::with_capacity(self.events.len());
+ pending_events.append(&mut self.events);
+ pending_events
+ }
+
+ fn filter_block<'a>(&self, txdata: &[(usize, &'a Transaction)]) -> Vec<(usize, &'a Transaction)> {
+ let mut matched_txids = HashSet::new();
+ txdata.iter().filter(|&&(_, tx)| {
+ // A tx matches the filter if it either matches the filter directly (via does_match_tx)
+ // or if it is a descendant of another matched transaction within the same block.
+ let mut matched = self.watched.does_match_tx(tx);
+ for input in tx.input.iter() {
+ if matched || matched_txids.contains(&input.previous_output.txid) {
+ matched = true;
+ break;
+ }
+ }
+ if matched {
+ matched_txids.insert(tx.txid());
+ }
+ matched
+ }).map(|e| *e).collect()
+ }
+}
+
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
+ ChainListener for ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- C::Target: ChainWatchInterface,
{
- fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[usize]) {
- let block_hash = header.block_hash();
+ fn block_connected(&self, header: &BlockHeader, txdata: &[(usize, &Transaction)], height: u32) {
+ let mut watch_events = self.watch_events.lock().unwrap();
+ let matched_txn = watch_events.filter_block(txdata);
{
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
- let txn_outputs = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+ let txn_outputs = monitor.block_connected(header, &matched_txn, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
for (ref txid, ref outputs) in txn_outputs {
for (idx, output) in outputs.iter().enumerate() {
- self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
+ watch_events.watch_output((txid, idx), &output.script_pubkey);
}
}
}
}
fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
- let block_hash = header.block_hash();
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
- monitor.block_disconnected(disconnected_height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+ monitor.block_disconnected(header, disconnected_height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
}
}
}
-impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
+impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- C::Target: ChainWatchInterface,
{
/// Creates a new object which can be used to monitor several channels given the chain
/// interface with which to register to receive notifications.
- pub fn new(chain_monitor: C, broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C> {
- let res = SimpleManyChannelMonitor {
+ pub fn new(broadcaster: T, logger: L, feeest: F) -> ChainMonitor<Key, ChanSigner, T, F, L> {
+ Self {
monitors: Mutex::new(HashMap::new()),
- chain_monitor,
+ watch_events: Mutex::new(WatchEventQueue::new()),
broadcaster,
logger,
fee_estimator: feeest,
- };
-
- res
+ }
}
/// Adds or updates the monitor which monitors the channel referred to by the given key.
- pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
+ fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
+ let mut watch_events = self.watch_events.lock().unwrap();
let mut monitors = self.monitors.lock().unwrap();
let entry = match monitors.entry(key) {
hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")),
{
let funding_txo = monitor.get_funding_txo();
log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..]));
- self.chain_monitor.install_watch_tx(&funding_txo.0.txid, &funding_txo.1);
- self.chain_monitor.install_watch_outpoint((funding_txo.0.txid, funding_txo.0.index as u32), &funding_txo.1);
+ watch_events.watch_tx(&funding_txo.0.txid, &funding_txo.1);
+ watch_events.watch_output((&funding_txo.0.txid, funding_txo.0.index as usize), &funding_txo.1);
for (txid, outputs) in monitor.get_outputs_to_watch().iter() {
for (idx, script) in outputs.iter().enumerate() {
- self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script);
+ watch_events.watch_output((txid, idx), script);
}
}
}
}
/// Updates the monitor which monitors the channel referred to by the given key.
- pub fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+ fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
let mut monitors = self.monitors.lock().unwrap();
match monitors.get_mut(&key) {
Some(orig_monitor) => {
}
}
-impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send> ManyChannelMonitor for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F, L, C>
+impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for ChainMonitor<OutPoint, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- C::Target: ChainWatchInterface,
{
type Keys = ChanSigner;
- fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+ fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
match self.add_monitor_by_key(funding_txo, monitor) {
Ok(_) => Ok(()),
Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
}
}
- fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+ fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
match self.update_monitor_by_key(funding_txo, update) {
Ok(_) => Ok(()),
Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
}
}
- fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+ fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
let mut pending_monitor_events = Vec::new();
for chan in self.monitors.lock().unwrap().values_mut() {
pending_monitor_events.append(&mut chan.get_and_clear_pending_monitor_events());
}
}
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> events::EventsProvider for ChainMonitor<Key, ChanSigner, T, F, L>
where T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- C::Target: ChainWatchInterface,
{
fn get_and_clear_pending_events(&self) -> Vec<Event> {
let mut pending_events = Vec::new();
}
}
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> chain::WatchEventProvider for ChainMonitor<Key, ChanSigner, T, F, L>
+ where T::Target: BroadcasterInterface,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ fn release_pending_watch_events(&self) -> Vec<chain::WatchEvent> {
+ self.watch_events.lock().unwrap().dequeue_events()
+ }
+}
+
/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
/// instead claiming it in its own individual transaction.
pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
#[derive(Clone)]
pub(super) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
- commitment_tx: LocalCommitmentTransaction,
+ commitment_tx: HolderCommitmentTransaction,
htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
},
LatestCounterpartyCommitmentTXInfo {
// Set once we've signed a holder commitment transaction and handed it over to our
// OnchainTxHandler. After this is set, no future updates to our holder commitment transactions
// may occur, and we fail any such monitor updates.
+ //
+ // In case of update rejection due to a locally already signed commitment transaction, we
+ // nevertheless store update content to track in case of concurrent broadcast by another
+ // remote monitor out-of-order with regards to the block view.
holder_tx_signed: bool,
// We simply modify last_block_hash in Channel's block_connected so that serialization is
secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
}
-/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
-/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
-/// events to it, while also taking any add/update_monitor events and passing them to some remote
-/// server(s).
-///
-/// In general, you must always have at least one local copy in memory, which must never fail to
-/// update (as it is responsible for broadcasting the latest state in case the channel is closed),
-/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails
-/// to update (eg out-of-memory or some other condition), you must immediately shut down without
-/// taking any further action such as writing the current state to disk. This should likely be
-/// accomplished via panic!() or abort().
-///
-/// Note that any updates to a channel's monitor *must* be applied to each instance of the
-/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
-/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
-/// which we have revoked, allowing our counterparty to claim all funds in the channel!
-///
-/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or
-/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather
-/// than calling these methods directly, the user should register implementors as listeners to the
-/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
-/// all registered listeners in one go.
-pub trait ManyChannelMonitor: Send + Sync {
- /// The concrete type which signs for transactions and provides access to our channel public
- /// keys.
- type Keys: ChannelKeys;
-
- /// Adds a monitor for the given `funding_txo`.
- ///
- /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
- /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
- /// callbacks with the funding transaction, or any spends of it.
- ///
- /// Further, the implementer must also ensure that each output returned in
- /// monitor.get_outputs_to_watch() is registered to ensure that the provided monitor learns about
- /// any spends of any of the outputs.
- ///
- /// Any spends of outputs which should have been registered which aren't passed to
- /// ChannelMonitors via block_connected may result in FUNDS LOSS.
- fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<Self::Keys>) -> Result<(), ChannelMonitorUpdateErr>;
-
- /// Updates a monitor for the given `funding_txo`.
- ///
- /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
- /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
- /// callbacks with the funding transaction, or any spends of it.
- ///
- /// Further, the implementer must also ensure that each output returned in
- /// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about
- /// any spends of any of the outputs.
- ///
- /// Any spends of outputs which should have been registered which aren't passed to
- /// ChannelMonitors via block_connected may result in FUNDS LOSS.
- fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
-
- /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
- /// with success or failure.
- ///
- /// You should probably just call through to
- /// ChannelMonitor::get_and_clear_pending_monitor_events() for each ChannelMonitor and return
- /// the full list.
- fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent>;
-}
-
#[cfg(any(test, feature = "fuzztarget"))]
/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
/// underlying object
counterparty_htlc_base_key: &PublicKey, counterparty_delayed_payment_base_key: &PublicKey,
on_holder_tx_csv: u16, funding_redeemscript: Script, channel_value_satoshis: u64,
commitment_transaction_number_obscure_factor: u64,
- initial_holder_commitment_tx: LocalCommitmentTransaction) -> ChannelMonitor<ChanSigner> {
+ initial_holder_commitment_tx: HolderCommitmentTransaction) -> ChannelMonitor<ChanSigner> {
assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize());
let holder_tx_locktime = initial_holder_commitment_tx.unsigned_tx.lock_time as u64;
let holder_commitment_tx = HolderSignedTx {
txid: initial_holder_commitment_tx.txid(),
- revocation_key: initial_holder_commitment_tx.local_keys.revocation_key,
- a_htlc_key: initial_holder_commitment_tx.local_keys.broadcaster_htlc_key,
- b_htlc_key: initial_holder_commitment_tx.local_keys.countersignatory_htlc_key,
- delayed_payment_key: initial_holder_commitment_tx.local_keys.broadcaster_delayed_payment_key,
- per_commitment_point: initial_holder_commitment_tx.local_keys.per_commitment_point,
+ revocation_key: initial_holder_commitment_tx.keys.revocation_key,
+ a_htlc_key: initial_holder_commitment_tx.keys.broadcaster_htlc_key,
+ b_htlc_key: initial_holder_commitment_tx.keys.countersignatory_htlc_key,
+ delayed_payment_key: initial_holder_commitment_tx.keys.broadcaster_delayed_payment_key,
+ per_commitment_point: initial_holder_commitment_tx.keys.per_commitment_point,
feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw,
htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions
};
- // Returning a monitor error before updating tracking points means in case of using
- // a concurrent watchtower implementation for same channel, if this one doesn't
- // reject update as we do, you MAY have the latest holder valid commitment tx onchain
- // for which you want to spend outputs. We're NOT robust again this scenario right
- // now but we should consider it later.
- onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx).unwrap();
+ onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx);
ChannelMonitor {
latest_update_id: 0,
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our holder commitment transaction is updated.
/// Panics if set_on_holder_tx_csv has never been called.
- pub(super) fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
- if self.holder_tx_signed {
- return Err(MonitorUpdateError("A holder commitment tx has already been signed, no new holder commitment txn can be sent to our counterparty"));
- }
+ pub(super) fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
let txid = commitment_tx.txid();
let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64;
let locktime = commitment_tx.unsigned_tx.lock_time as u64;
let mut new_holder_commitment_tx = HolderSignedTx {
txid,
- revocation_key: commitment_tx.local_keys.revocation_key,
- a_htlc_key: commitment_tx.local_keys.broadcaster_htlc_key,
- b_htlc_key: commitment_tx.local_keys.countersignatory_htlc_key,
- delayed_payment_key: commitment_tx.local_keys.broadcaster_delayed_payment_key,
- per_commitment_point: commitment_tx.local_keys.per_commitment_point,
+ revocation_key: commitment_tx.keys.revocation_key,
+ a_htlc_key: commitment_tx.keys.broadcaster_htlc_key,
+ b_htlc_key: commitment_tx.keys.countersignatory_htlc_key,
+ delayed_payment_key: commitment_tx.keys.broadcaster_delayed_payment_key,
+ per_commitment_point: commitment_tx.keys.per_commitment_point,
feerate_per_kw: commitment_tx.feerate_per_kw,
htlc_outputs: htlc_outputs,
};
- // Returning a monitor error before updating tracking points means in case of using
- // a concurrent watchtower implementation for same channel, if this one doesn't
- // reject update as we do, you MAY have the latest holder valid commitment tx onchain
- // for which you want to spend outputs. We're NOT robust again this scenario right
- // now but we should consider it later.
- if let Err(_) = self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx) {
- return Err(MonitorUpdateError("Holder commitment signed has already been signed, no further update of LOCAL commitment transaction is allowed"));
- }
+ self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx);
self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
+ if self.holder_tx_signed {
+ return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected"));
+ }
Ok(())
}
}
/// Get the list of HTLCs who's status has been updated on chain. This should be called by
- /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_monitor_events().
+ /// ChannelManager via [`chain::Watch::release_pending_monitor_events`].
+ ///
+ /// [`chain::Watch::release_pending_monitor_events`]: ../../chain/trait.Watch.html#tymethod.release_pending_monitor_events
pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
let mut ret = Vec::new();
mem::swap(&mut ret, &mut self.pending_monitor_events);
/// Gets the list of pending events which were generated by previous actions, clearing the list
/// in the process.
///
- /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
+ /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to
/// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
/// no internal locking in ChannelMonitors.
pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
}
/// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
- /// to bypass LocalCommitmentTransaction state update lockdown after signature and generate
+ /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
/// revoked commitment transaction.
#[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
Vec::new()
}
- /// Called by SimpleManyChannelMonitor::block_connected, which implements
- /// ChainListener::block_connected.
- /// Eventually this should be pub and, roughly, implement ChainListener, however this requires
- /// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of
- /// on-chain.
- fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<TxOut>)>
+ /// Processes transactions in a newly connected block, which may result in any of the following:
+ /// - update the monitor's state against resolved HTLCs
+ /// - punish the counterparty in the case of seeing a revoked commitment transaction
+ /// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration
+ /// - detect settled outputs for later spending
+ /// - schedule and bump any in-flight claims
+ ///
+ /// Returns any transaction outputs from `txn_matched` that spends of should be watched for.
+ /// After called these are also available via [`get_outputs_to_watch`].
+ ///
+ /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch
+ pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, txn_matched: &[(usize, &Transaction)], height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<TxOut>)>
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- for tx in txn_matched {
+ for &(_, tx) in txn_matched {
let mut output_val = 0;
for out in tx.output.iter() {
if out.value > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); }
}
}
+ let block_hash = header.block_hash();
log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
+
let mut watch_outputs = Vec::new();
let mut claimable_outpoints = Vec::new();
- for tx in txn_matched {
+ for &(_, tx) in txn_matched {
if tx.input.len() == 1 {
// Assuming our keys were not leaked (in which case we're screwed no matter what),
// commitment transactions and HTLC transactions will all only ever have one input,
self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger);
- self.last_block_hash = block_hash.clone();
+ self.last_block_hash = block_hash;
for &(ref txid, ref output_scripts) in watch_outputs.iter() {
self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect());
}
watch_outputs
}
- fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L)
+ /// Determines if the disconnected block contained any transactions of interest and updates
+ /// appropriately.
+ pub fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
+ let block_hash = header.block_hash();
log_trace!(logger, "Block {} at height {} disconnected", block_hash, height);
+
if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
//We may discard:
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger);
- self.last_block_hash = block_hash.clone();
+ self.last_block_hash = block_hash;
}
fn would_broadcast_at_height<L: Deref>(&self, height: u32, logger: &L) -> bool where L::Target: Logger {
to_self_delay: self.on_holder_tx_csv,
output: outp.clone(),
key_derivation_params: self.keys.key_derivation_params(),
- counterparty_revocation_pubkey: broadcasted_holder_revokable_script.2.clone(),
+ revocation_pubkey: broadcasted_holder_revokable_script.2.clone(),
});
break;
}
use ln::channelmonitor::ChannelMonitor;
use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
use ln::chan_utils;
- use ln::chan_utils::{HTLCOutputInCommitment, LocalCommitmentTransaction};
+ use ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction};
use util::test_utils::TestLogger;
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
(OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
&PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
&PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()),
- 10, Script::new(), 46, 0, LocalCommitmentTransaction::dummy());
+ 10, Script::new(), 46, 0, HolderCommitmentTransaction::dummy());
- monitor.provide_latest_holder_commitment_tx_info(LocalCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
+ monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
// Now update holder commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
- monitor.provide_latest_holder_commitment_tx_info(LocalCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();
+ monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();
secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
monitor.provide_secret(281474976710653, secret.clone()).unwrap();
assert_eq!(monitor.payment_preimages.len(), 12);
test_preimages_exist!(&preimages[18..20], monitor);
// But if we do it again, we'll prune 5-10
- monitor.provide_latest_holder_commitment_tx_info(LocalCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..3])).unwrap();
+ monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..3])).unwrap();
secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
monitor.provide_secret(281474976710652, secret.clone()).unwrap();
assert_eq!(monitor.payment_preimages.len(), 5);