/// Indicates that the latest ChannelMonitor update has been committed by the client
/// successfully and we should restore normal operation. Returns messages which should be sent
/// to the remote side.
- pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) where L::Target: Logger {
+ pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> (
+ Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Option<ChannelMonitorUpdate>,
+ Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Vec<(HTLCSource, PaymentHash)>,
+ bool, Option<msgs::FundingLocked>) where L::Target: Logger {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked);
+ return (None, None, RAACommitmentOrder::RevokeAndACKFirst, None, forwards, failures, Vec::new(), needs_broadcast_safe, funding_locked);
}
let raa = if self.monitor_pending_revoke_and_ack {
Some(self.get_last_revoke_and_ack())
} else { None };
- let commitment_update = if self.monitor_pending_commitment_signed {
+ let mut commitment_update = if self.monitor_pending_commitment_signed {
Some(self.get_last_commitment_update(logger))
} else { None };
+ let mut order = self.resend_order.clone();
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- let order = self.resend_order.clone();
+
+ let mut htlcs_failed_to_forward = Vec::new();
+ let mut chanmon_update = None;
+ if commitment_update.is_none() && self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) == 0 {
+ order = RAACommitmentOrder::RevokeAndACKFirst;
+
+ let (update_opt, mut failed_htlcs) = self.free_holding_cell_htlcs(logger).unwrap();
+ htlcs_failed_to_forward.append(&mut failed_htlcs);
+ if let Some((com_update, mon_update)) = update_opt {
+ commitment_update = Some(com_update);
+ chanmon_update = Some(mon_update);
+ }
+ }
+
log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
if needs_broadcast_safe { "a funding broadcast safe, " } else { "" },
if commitment_update.is_some() { "a" } else { "no" },
if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
- (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
+ (raa, commitment_update, order, chanmon_update, forwards, failures, htlcs_failed_to_forward, needs_broadcast_safe, funding_locked)
}
pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
}
macro_rules! handle_chan_restoration_locked {
- ($self: expr, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr,
+ ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
+ $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
$pending_forwards: expr, $broadcast_safe: expr, $funding_locked: expr) => { {
let mut htlc_forwards = None;
let mut funding_broadcast_safe = None;
let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
+ let channel_id = $channel_entry.get().channel_id();
- {
+ let res = loop {
if !$pending_forwards.is_empty() {
htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
$channel_entry.get().get_funding_txo().unwrap(), $pending_forwards));
}
macro_rules! handle_cs { () => {
+ if let Some(monitor_update) = $chanmon_update {
+ assert!($order == RAACommitmentOrder::RevokeAndACKFirst);
+ assert!(!$broadcast_safe);
+ assert!($funding_locked.is_none());
+ assert!($commitment_update.is_some());
+ if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ break handle_monitor_err!($self, e, $channel_state, $channel_entry, RAACommitmentOrder::CommitmentFirst, false, true);
+ }
+ }
if let Some(update) = $commitment_update {
$channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
msg: announcement_sigs,
});
}
- $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
+ $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), channel_id);
}
- }
- (htlc_forwards, funding_broadcast_safe)
+ break Ok(());
+ };
+
+ (htlc_forwards, funding_broadcast_safe, res, channel_id, counterparty_node_id)
} }
}
macro_rules! post_handle_chan_restoration {
- ($self: expr, $locked_res: expr, $pending_failures: expr) => { {
- let (htlc_forwards, funding_broadcast_safe) = $locked_res;
+ ($self: ident, $locked_res: expr, $pending_failures: expr, $forwarding_failures: expr) => { {
+ let (htlc_forwards, funding_broadcast_safe, res, channel_id, counterparty_node_id) = $locked_res;
+
+ let _ = handle_error!($self, res, counterparty_node_id);
if let Some(ev) = funding_broadcast_safe {
$self.pending_events.lock().unwrap().push(ev);
}
+ $self.fail_holding_cell_htlcs($forwarding_failures, channel_id);
for failure in $pending_failures.drain(..) {
$self.fail_htlc_backwards_internal($self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
/// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
/// exists largely only to prevent races between this and concurrent update_monitor calls.
///
+ /// In some cases, this may generate a monitor update, resulting in a call to the
+ /// `chain::Watch`'s `update_channel` method for the same channel monitor which is being
+ /// notified of a successful update here. Because of this, please be very careful with
+ /// reentrancy bugs! It is incredibly easy to write an implementation of `update_channel` which
+ /// will take a lock that is also held when calling this method.
+ ///
/// Thus, the anticipated use is, at a high level:
/// 1) You register a chain::Watch with this ChannelManager,
/// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- let (mut pending_failures, chan_restoration_res) = {
+ let (mut pending_failures, forwarding_failures, chan_restoration_res) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
- let (raa, commitment_update, order, pending_forwards, pending_failures, needs_broadcast_safe, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
- (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, pending_forwards, needs_broadcast_safe, funding_locked))
+ let (raa, commitment_update, order, chanmon_update, pending_forwards, pending_failures, forwarding_failures, needs_broadcast_safe, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
+ (pending_failures, forwarding_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, chanmon_update, pending_forwards, needs_broadcast_safe, funding_locked))
};
- post_handle_chan_restoration!(self, chan_restoration_res, pending_failures);
+ post_handle_chan_restoration!(self, chan_restoration_res, pending_failures, forwarding_failures);
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {