TemporaryFailure,
/// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
/// different watchtower and cannot update with all watchtowers that were previously informed
- /// of this channel). This will force-close the channel in question.
+ /// of this channel). This will force-close the channel in question (which will generate one
+ /// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy).
///
- /// Should also be used to indicate a failure to update the local copy of the channel monitor.
+ /// Should also be used to indicate a failure to update the local persisted copy of the channel
+ /// monitor.
PermanentFailure,
}
/// events to it, while also taking any add/update_monitor events and passing them to some remote
/// server(s).
///
+/// In general, you must always have at least one local copy in memory, which must never fail to
+/// update (as it is responsible for broadcasting the latest state in case the channel is closed),
+/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails
+/// to update (eg out-of-memory or some other condition), you must immediately shut down without
+/// taking any further action such as writing the current state to disk. This should likely be
+/// accomplished via panic!() or abort().
+///
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
match monitors.get_mut(&key) {
Some(orig_monitor) => {
log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor.key_storage));
- orig_monitor.update_monitor(update)
+ orig_monitor.update_monitor(update, &self.broadcaster)
},
None => Err(MonitorUpdateError("No such monitor registered"))
}
RescueRemoteCommitmentTXInfo {
their_current_per_commitment_point: PublicKey,
},
+ /// Used to indicate that the no future updates will occur, and likely that the latest local
+ /// commitment transaction(s) should be broadcast, as the channel has been force-closed.
+ ChannelForceClosed {
+ /// If set to false, we shouldn't broadcast the latest local commitment transaction as we
+ /// think we've fallen behind!
+ should_broadcast: bool,
+ },
}
impl Writeable for ChannelMonitorUpdateStep {
4u8.write(w)?;
their_current_per_commitment_point.write(w)?;
},
+ &ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => {
+ 5u8.write(w)?;
+ should_broadcast.write(w)?;
+ },
}
Ok(())
}
their_current_per_commitment_point: Readable::read(r)?,
})
},
+ 5u8 => {
+ Ok(ChannelMonitorUpdateStep::ChannelForceClosed {
+ should_broadcast: Readable::read(r)?
+ })
+ },
_ => Err(DecodeError::InvalidValue),
}
}
latest_update_id: u64,
commitment_transaction_number_obscure_factor: u64,
+ destination_script: Script,
key_storage: Storage<ChanSigner>,
their_htlc_base_key: Option<PublicKey>,
their_delayed_payment_base_key: Option<PublicKey>,
fn eq(&self, other: &Self) -> bool {
if self.latest_update_id != other.latest_update_id ||
self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
+ self.destination_script != other.destination_script ||
self.key_storage != other.key_storage ||
self.their_htlc_base_key != other.their_htlc_base_key ||
self.their_delayed_payment_base_key != other.their_delayed_payment_base_key ||
// Set in initial Channel-object creation, so should always be set by now:
U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
+ self.destination_script.write(writer)?;
match self.key_storage {
Storage::Local { ref keys, ref funding_key, ref revocation_base_key, ref htlc_base_key, ref delayed_payment_base_key, ref payment_base_key, ref shutdown_pubkey, ref funding_info, ref current_remote_commitment_txid, ref prev_remote_commitment_txid } => {
writer.write_all(&[0; 1])?;
for ev in events.iter() {
match *ev {
OnchainEvent::HTLCUpdate { ref htlc_update } => {
- writer.write_all(&[1; 1])?;
+ 0u8.write(writer)?;
htlc_update.0.write(writer)?;
htlc_update.1.write(writer)?;
},
latest_update_id: 0,
commitment_transaction_number_obscure_factor,
+ destination_script: destination_script.clone(),
key_storage: Storage::Local {
keys,
funding_key,
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
}
+ pub(super) fn broadcast_latest_local_commitment_txn<B: Deref>(&mut self, broadcaster: &B)
+ where B::Target: BroadcasterInterface,
+ {
+ for tx in self.get_latest_local_commitment_txn().iter() {
+ broadcaster.broadcast_transaction(tx);
+ }
+ }
+
/// Used in Channel to cheat wrt the update_ids since it plays games, will be removed soon!
pub(super) fn update_monitor_ooo(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
for update in updates.updates.drain(..) {
self.provide_secret(idx, secret)?,
ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+ ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
}
}
self.latest_update_id = updates.update_id;
/// itself.
///
/// panics if the given update is not the next update by update_id.
- pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+ pub fn update_monitor<B: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B) -> Result<(), MonitorUpdateError>
+ where B::Target: BroadcasterInterface,
+ {
if self.latest_update_id + 1 != updates.update_id {
panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
}
self.provide_secret(idx, secret)?,
ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+ ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
+ if should_broadcast {
+ self.broadcast_latest_local_commitment_txn(broadcaster);
+ } else {
+ log_error!(self, "You have a toxic local commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take");
+ }
+ }
}
}
self.latest_update_id = updates.update_id;
}
/// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
- fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32) -> Vec<ClaimRequest> {
- //TODO: send back new outputs to guarantee pending_claim_request consistency
+ fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32) -> (Vec<ClaimRequest>, Option<(Sha256dHash, Vec<TxOut>)>) {
+ let htlc_txid = tx.txid();
if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 {
- return Vec::new()
+ return (Vec::new(), None)
}
macro_rules! ignore_error {
( $thing : expr ) => {
match $thing {
Ok(a) => a,
- Err(_) => return Vec::new()
+ Err(_) => return (Vec::new(), None)
}
};
}
- let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return Vec::new(); };
+ let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); };
let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
let (revocation_pubkey, revocation_key) = match self.key_storage {
Storage::Watchtower { .. } => { unimplemented!() }
};
let delayed_key = match self.their_delayed_payment_base_key {
- None => return Vec::new(),
+ None => return (Vec::new(), None),
Some(their_delayed_payment_base_key) => ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &per_commitment_point, &their_delayed_payment_base_key)),
};
let redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.our_to_self_delay, &delayed_key);
- let htlc_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
log_trace!(self, "Remote HTLC broadcast {}:{}", htlc_txid, 0);
let witness_data = InputMaterial::Revoked { witness_script: redeemscript, pubkey: Some(revocation_pubkey), key: revocation_key, is_htlc: false, amount: tx.output[0].value };
let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.our_to_self_delay as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data });
- claimable_outpoints
+ (claimable_outpoints, Some((htlc_txid, tx.output.clone())))
}
fn broadcast_by_local_state(&self, local_tx: &LocalSignedTx, delayed_payment_base_key: &SecretKey) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, Vec<TxOut>) {
/// out-of-band the other node operator to coordinate with him if option is available to you.
/// In any-case, choice is up to the user.
pub fn get_latest_local_commitment_txn(&mut self) -> Vec<Transaction> {
+ // TODO: We should likely move all of the logic in here into OnChainTxHandler and unify it
+ // to ensure add_local_sig is only ever called once no matter what. This likely includes
+ // tracking state and panic!()ing if we get an update after force-closure/local-tx signing.
log_trace!(self, "Getting signed latest local commitment transaction!");
if let &mut Some(ref mut local_tx) = &mut self.current_local_signed_commitment_tx {
match self.key_storage {
}
} else {
if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) {
- let mut new_outpoints = self.check_spend_remote_htlc(&tx, commitment_number, height);
+ let (mut new_outpoints, new_outputs_option) = self.check_spend_remote_htlc(&tx, commitment_number, height);
claimable_outpoints.append(&mut new_outpoints);
+ if let Some(new_outputs) = new_outputs_option {
+ watch_outputs.push(new_outputs);
+ }
}
}
}
// can also be resolved in a few other ways which can have more than one output. Thus,
// we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
self.is_resolving_htlc_output(&tx, height);
+
+ if let Some(spendable_output) = self.is_paying_spendable_output(&tx) {
+ spendable_outputs.push(spendable_output);
+ }
}
let should_broadcast = if let Some(_) = self.current_local_signed_commitment_tx {
self.would_broadcast_at_height(height)
}
}
}
- let mut spendable_output = self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator);
- spendable_outputs.append(&mut spendable_output);
+ self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator);
self.last_block_hash = block_hash.clone();
for &(ref txid, ref output_scripts) in watch_outputs.iter() {
self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect());
}
+ for spend in spendable_outputs.iter() {
+ log_trace!(self, "Announcing spendable output to user: {}", log_spendable!(spend));
+ }
+
if spendable_outputs.len() > 0 {
self.pending_events.push(events::Event::SpendableOutputs {
outputs: spendable_outputs,
if let Some((source, payment_hash)) = payment_data {
let mut payment_preimage = PaymentPreimage([0; 32]);
if accepted_preimage_claim {
- payment_preimage.0.copy_from_slice(&input.witness[3]);
- self.pending_htlcs_updated.push(HTLCUpdate {
- source,
- payment_preimage: Some(payment_preimage),
- payment_hash
- });
+ if !self.pending_htlcs_updated.iter().any(|update| update.source == source) {
+ payment_preimage.0.copy_from_slice(&input.witness[3]);
+ self.pending_htlcs_updated.push(HTLCUpdate {
+ source,
+ payment_preimage: Some(payment_preimage),
+ payment_hash
+ });
+ }
} else if offered_preimage_claim {
- payment_preimage.0.copy_from_slice(&input.witness[1]);
- self.pending_htlcs_updated.push(HTLCUpdate {
- source,
- payment_preimage: Some(payment_preimage),
- payment_hash
- });
+ if !self.pending_htlcs_updated.iter().any(|update| update.source == source) {
+ payment_preimage.0.copy_from_slice(&input.witness[1]);
+ self.pending_htlcs_updated.push(HTLCUpdate {
+ source,
+ payment_preimage: Some(payment_preimage),
+ payment_hash
+ });
+ }
} else {
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
}
}
}
+
+ /// Check if any transaction broadcasted is paying fund back to some address we can assume to own
+ fn is_paying_spendable_output(&self, tx: &Transaction) -> Option<SpendableOutputDescriptor> {
+ for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us
+ if outp.script_pubkey == self.destination_script {
+ return Some(SpendableOutputDescriptor::StaticOutput {
+ outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 },
+ output: outp.clone(),
+ });
+ }
+ }
+ None
+ }
}
const MAX_ALLOC_SIZE: usize = 64*1024;
let latest_update_id: u64 = Readable::read(reader)?;
let commitment_transaction_number_obscure_factor = <U48 as Readable>::read(reader)?.0;
+ let destination_script = Readable::read(reader)?;
+
let key_storage = match <u8 as Readable>::read(reader)? {
0 => {
let keys = Readable::read(reader)?;
latest_update_id,
commitment_transaction_number_obscure_factor,
+ destination_script,
+
key_storage,
their_htlc_base_key,
their_delayed_payment_base_key,