},
}
+/// A struct capturing the set of things we may need to do after a reconnect or after monitor
+/// updating has been restored.
+pub(super) struct ChannelRestoredUpdates {
+ pub(super) revoke_and_ack: Option<msgs::RevokeAndACK>,
+ pub(super) commitment_update: Option<msgs::CommitmentUpdate>,
+ pub(super) raa_commitment_order: RAACommitmentOrder,
+ pub(super) chanmon_update: Option<ChannelMonitorUpdate>,
+ pub(super) pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ pub(super) pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ pub(super) forwarding_failures: Vec<(HTLCSource, PaymentHash)>,
+ pub(super) needs_broadcast_safe: bool,
+ pub(super) funding_locked: Option<msgs::FundingLocked>,
+ pub(super) shutdown: Option<msgs::Shutdown>,
+}
+
/// There are a few "states" and then a number of flags which can be applied:
/// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
/// TheirFundingLocked and OurFundingLocked then get set on FundingSent, and when both are set we
/// Indicates that the latest ChannelMonitor update has been committed by the client
/// successfully and we should restore normal operation. Returns messages which should be sent
/// to the remote side.
- pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> (
- Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Option<ChannelMonitorUpdate>,
- Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Vec<(HTLCSource, PaymentHash)>,
- bool, Option<msgs::FundingLocked>) where L::Target: Logger {
+ pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> ChannelRestoredUpdates where L::Target: Logger {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
})
} else { None };
- let mut forwards = Vec::new();
- mem::swap(&mut forwards, &mut self.monitor_pending_forwards);
- let mut failures = Vec::new();
- mem::swap(&mut failures, &mut self.monitor_pending_failures);
+ let mut pending_forwards = Vec::new();
+ mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards);
+ let mut pending_failures = Vec::new();
+ mem::swap(&mut pending_failures, &mut self.monitor_pending_failures);
if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- return (None, None, RAACommitmentOrder::RevokeAndACKFirst, None, forwards, failures, Vec::new(), needs_broadcast_safe, funding_locked);
+ return ChannelRestoredUpdates {
+ revoke_and_ack: None,
+ commitment_update: None,
+ raa_commitment_order: RAACommitmentOrder::RevokeAndACKFirst,
+ chanmon_update: None,
+ pending_forwards,
+ pending_failures,
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe,
+ funding_locked,
+ shutdown: None,
+ }
}
- let raa = if self.monitor_pending_revoke_and_ack {
+ let revoke_and_ack = if self.monitor_pending_revoke_and_ack {
Some(self.get_last_revoke_and_ack())
} else { None };
let mut commitment_update = if self.monitor_pending_commitment_signed {
Some(self.get_last_commitment_update(logger))
} else { None };
- let mut order = self.resend_order.clone();
+ let mut raa_commitment_order = self.resend_order.clone();
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- let mut htlcs_failed_to_forward = Vec::new();
+ let mut forwarding_failures = Vec::new();
let mut chanmon_update = None;
if commitment_update.is_none() && self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) == 0 {
- order = RAACommitmentOrder::RevokeAndACKFirst;
+ raa_commitment_order = RAACommitmentOrder::RevokeAndACKFirst;
let (update_opt, mut failed_htlcs) = self.free_holding_cell_htlcs(logger).unwrap();
- htlcs_failed_to_forward.append(&mut failed_htlcs);
+ forwarding_failures.append(&mut failed_htlcs);
if let Some((com_update, mon_update)) = update_opt {
commitment_update = Some(com_update);
chanmon_update = Some(mon_update);
log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
if needs_broadcast_safe { "a funding broadcast safe, " } else { "" },
if commitment_update.is_some() { "a" } else { "no" },
- if raa.is_some() { "an" } else { "no" },
- match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
- (raa, commitment_update, order, chanmon_update, forwards, failures, htlcs_failed_to_forward, needs_broadcast_safe, funding_locked)
+ if revoke_and_ack.is_some() { "an" } else { "no" },
+ match raa_commitment_order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+
+ ChannelRestoredUpdates {
+ revoke_and_ack,
+ commitment_update,
+ raa_commitment_order,
+ chanmon_update,
+ pending_forwards,
+ pending_failures,
+ forwarding_failures,
+ needs_broadcast_safe,
+ funding_locked,
+ shutdown: None,
+ }
}
pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
- pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitorUpdate>, RAACommitmentOrder, Vec<(HTLCSource, PaymentHash)>, Option<msgs::Shutdown>), ChannelError> where L::Target: Logger {
+ pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L) -> Result<ChannelRestoredUpdates, ChannelError> where L::Target: Logger {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned()));
}
// Short circuit the whole handler as there is nothing we can resend them
- return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, Vec::new(), shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: None,
+ commitment_update: None,
+ raa_commitment_order: RAACommitmentOrder::CommitmentFirst,
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe: false,
+ funding_locked: None,
+ shutdown: shutdown_msg,
+ });
}
// We have OurFundingLocked set!
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- return Ok((Some(msgs::FundingLocked {
- channel_id: self.channel_id(),
- next_per_commitment_point,
- }), None, None, None, RAACommitmentOrder::CommitmentFirst, Vec::new(), shutdown_msg));
+
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: None,
+ commitment_update: None,
+ raa_commitment_order: RAACommitmentOrder::CommitmentFirst,
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe: false,
+ funding_locked: Some(msgs::FundingLocked {
+ channel_id: self.channel_id(),
+ next_per_commitment_point,
+ }),
+ shutdown: shutdown_msg,
+ });
}
let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), htlcs_to_fail)) => {
- return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), htlcs_to_fail, shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: required_revoke,
+ commitment_update: Some(commitment_update),
+ raa_commitment_order: self.resend_order.clone(),
+ chanmon_update: Some(monitor_update),
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: htlcs_to_fail,
+ needs_broadcast_safe: false,
+ funding_locked: resend_funding_locked,
+ shutdown: shutdown_msg,
+ });
},
Ok((None, htlcs_to_fail)) => {
- return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), htlcs_to_fail, shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: required_revoke,
+ commitment_update: None,
+ raa_commitment_order: self.resend_order.clone(),
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: htlcs_to_fail,
+ needs_broadcast_safe: false,
+ funding_locked: resend_funding_locked,
+ shutdown: shutdown_msg,
+ });
},
}
} else {
- return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), Vec::new(), shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: required_revoke,
+ commitment_update: None,
+ raa_commitment_order: self.resend_order.clone(),
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe: false,
+ funding_locked: resend_funding_locked,
+ shutdown: shutdown_msg,
+ });
}
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
if required_revoke.is_some() {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
self.monitor_pending_commitment_signed = true;
- return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), Vec::new(), shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: None,
+ commitment_update: None,
+ raa_commitment_order: self.resend_order.clone(),
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe: false,
+ funding_locked: resend_funding_locked,
+ shutdown: shutdown_msg,
+ });
}
- return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update(logger)), None, self.resend_order.clone(), Vec::new(), shutdown_msg));
+ return Ok(ChannelRestoredUpdates {
+ revoke_and_ack: required_revoke,
+ commitment_update: Some(self.get_last_commitment_update(logger)),
+ raa_commitment_order: self.resend_order.clone(),
+ chanmon_update: None,
+ pending_forwards: Vec::new(),
+ pending_failures: Vec::new(),
+ forwarding_failures: Vec::new(),
+ needs_broadcast_safe: false,
+ funding_locked: resend_funding_locked,
+ shutdown: shutdown_msg,
+ });
} else {
return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()));
}
}
macro_rules! handle_chan_restoration_locked {
- ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
- $pending_forwards: expr, $broadcast_safe: expr, $funding_locked: expr) => { {
+ ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, $updates: expr) => { {
let mut htlc_forwards = None;
let mut funding_broadcast_safe = None;
let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
let channel_id = $channel_entry.get().channel_id();
let res = loop {
- let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
+ if let Some(msg) = $updates.shutdown {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ }
+
+ let forwards: Vec<(PendingHTLCInfo, u64)> = $updates.pending_forwards; // Force type-checking to resolve
if !forwards.is_empty() {
htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
$channel_entry.get().get_funding_txo().unwrap(), forwards));
}
- if $chanmon_update.is_some() {
- assert!($commitment_update.is_some());
- assert!($funding_locked.is_none());
+ if $updates.chanmon_update.is_some() {
+ assert!($updates.commitment_update.is_some());
+ assert!($updates.funding_locked.is_none());
}
- if let Some(msg) = $funding_locked {
+ if let Some(msg) = $updates.funding_locked {
$channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
node_id: counterparty_node_id,
msg,
}
macro_rules! handle_cs { () => {
- if let Some(monitor_update) = $chanmon_update {
- assert!($order == RAACommitmentOrder::RevokeAndACKFirst);
- assert!(!$broadcast_safe);
- assert!($commitment_update.is_some());
+ if let Some(monitor_update) = $updates.chanmon_update {
+ assert!($updates.raa_commitment_order == RAACommitmentOrder::RevokeAndACKFirst);
+ assert!(!$updates.needs_broadcast_safe);
+ assert!($updates.commitment_update.is_some());
if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
break handle_monitor_err!($self, e, $channel_state, $channel_entry, RAACommitmentOrder::CommitmentFirst, false, true);
}
}
- if let Some(update) = $commitment_update {
+ if let Some(update) = $updates.commitment_update {
$channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id,
updates: update,
}
} }
macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = $raa {
+ if let Some(revoke_and_ack) = $updates.revoke_and_ack {
$channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id,
msg: revoke_and_ack,
});
}
} }
- match $order {
+ match $updates.raa_commitment_order {
RAACommitmentOrder::CommitmentFirst => {
handle_cs!();
handle_raa!();
handle_cs!();
},
}
- if $broadcast_safe {
+ if $updates.needs_broadcast_safe {
funding_broadcast_safe = Some(events::Event::FundingBroadcastSafe {
funding_txo: $channel_entry.get().get_funding_txo().unwrap(),
user_channel_id: $channel_entry.get().get_user_id(),
break Ok(());
};
- (htlc_forwards, funding_broadcast_safe, res, channel_id, counterparty_node_id)
+ ($updates.pending_failures, $updates.forwarding_failures, htlc_forwards,
+ funding_broadcast_safe, res, channel_id, counterparty_node_id)
} }
}
macro_rules! post_handle_chan_restoration {
- ($self: ident, $locked_res: expr, $pending_failures: expr, $forwarding_failures: expr) => { {
- let (htlc_forwards, funding_broadcast_safe, res, channel_id, counterparty_node_id) = $locked_res;
+ ($self: ident, $locked_res: expr) => { {
+ let (mut pending_failures, forwarding_failures, htlc_forwards, funding_broadcast_safe,
+ res, channel_id, counterparty_node_id) = $locked_res;
let _ = handle_error!($self, res, counterparty_node_id);
$self.pending_events.lock().unwrap().push(ev);
}
- let forwarding_failures: Vec<(HTLCSource, PaymentHash)> = $forwarding_failures; // Force type-checking to resolve
$self.fail_holding_cell_htlcs(forwarding_failures, channel_id);
-
- let mut pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)> = $pending_failures; // Force type-checking to resolve
for failure in pending_failures.drain(..) {
$self.fail_htlc_backwards_internal($self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- let (pending_failures, forwarding_failures, chan_restoration_res) = {
+ let chan_restoration_res = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
- let (raa, commitment_update, order, chanmon_update, pending_forwards, pending_failures, forwarding_failures, needs_broadcast_safe, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
- (pending_failures, forwarding_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, chanmon_update, pending_forwards, needs_broadcast_safe, funding_locked))
+ let updates = channel.get_mut().monitor_updating_restored(&self.logger);
+ handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates)
};
- post_handle_chan_restoration!(self, chan_restoration_res, pending_failures, forwarding_failures);
+ post_handle_chan_restoration!(self, chan_restoration_res);
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let (htlcs_failed_forward, chan_restoration_res) = {
+ let chan_restoration_res = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
// disconnect, so Channel's reestablish will never hand us any holding cell
// freed HTLCs to fail backwards. If in the future we no longer drop pending
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
- try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
- if let Some(msg) = shutdown {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), false, funding_locked))
+ let updates = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+ handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, updates)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- post_handle_chan_restoration!(self, chan_restoration_res, Vec::new(), htlcs_failed_forward);
+ post_handle_chan_restoration!(self, chan_restoration_res);
Ok(())
}