pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder,
- pub mon_update: Option<ChannelMonitorUpdate>,
- pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
pub shutdown_msg: Option<msgs::Shutdown>,
}
// Short circuit the whole handler as there is nothing we can resend them
return Ok(ReestablishResponses {
channel_ready: None,
- raa: None, commitment_update: None, mon_update: None,
+ raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
- holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs,
});
}
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
}),
- raa: None, commitment_update: None, mon_update: None,
+ raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
- holding_cell_failed_htlcs: Vec::new(),
shutdown_msg, announcement_sigs,
});
}
log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
}
- if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- // We're up-to-date and not waiting on a remote revoke (if we are our
- // channel_reestablish should result in them sending a revoke_and_ack), but we may
- // have received some updates while we were disconnected. Free the holding cell
- // now!
- match self.free_holding_cell_htlcs(logger) {
- Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
- Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
- panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
- Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: Some(commitment_update),
- order: self.resend_order.clone(),
- mon_update: Some(monitor_update),
- holding_cell_failed_htlcs,
- })
- },
- Ok((None, holding_cell_failed_htlcs)) => {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs,
- })
- },
- }
- } else {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs: Vec::new(),
- })
- }
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: None,
+ order: self.resend_order.clone(),
+ })
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
if required_revoke.is_some() {
log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
self.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses {
channel_ready, shutdown_msg, announcement_sigs,
- commitment_update: None, raa: None, mon_update: None,
+ commitment_update: None, raa: None,
order: self.resend_order.clone(),
- holding_cell_failed_htlcs: Vec::new(),
})
} else {
Ok(ReestablishResponses {
raa: required_revoke,
commitment_update: Some(self.get_last_commitment_update(logger)),
order: self.resend_order.clone(),
- mon_update: None,
- holding_cell_failed_htlcs: Vec::new(),
})
}
} else {
}
macro_rules! handle_chan_restoration_locked {
- ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
- $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
+ ($self: ident, $channel_state: expr, $channel_entry: expr,
+ $raa: expr, $commitment_update: expr, $order: expr,
$pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
let mut htlc_forwards = None;
- let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
- let chanmon_update_is_none = chanmon_update.is_none();
let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
let res = loop {
let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
$channel_entry.get().get_funding_txo().unwrap(), forwards));
}
- if chanmon_update.is_some() {
- // On reconnect, we, by definition, only resend a channel_ready if there have been
- // no commitment updates, so the only channel monitor update which could also be
- // associated with a channel_ready would be the funding_created/funding_signed
- // monitor update. That monitor update failing implies that we won't send
- // channel_ready until it's been updated, so we can't have a channel_ready and a
- // monitor update here (so we don't bother to handle it correctly below).
- assert!($channel_ready.is_none());
- // A channel monitor update makes no sense without either a channel_ready or a
- // commitment update to process after it. Since we can't have a channel_ready, we
- // only bother to handle the monitor-update + commitment_update case below.
- assert!($commitment_update.is_some());
- }
-
if let Some(msg) = $channel_ready {
- // Similar to the above, this implies that we're letting the channel_ready fly
- // before it should be allowed to.
- assert!(chanmon_update.is_none());
send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
}
if let Some(msg) = $announcement_sigs {
emit_channel_ready_event!($self, $channel_entry.get_mut());
- let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
- if let Some(monitor_update) = chanmon_update {
- // We only ever broadcast a funding transaction in response to a funding_signed
- // message and the resulting monitor update. Thus, on channel_reestablish
- // message handling we can't have a funding transaction to broadcast. When
- // processing a monitor update finishing resulting in a funding broadcast, we
- // cannot have a second monitor update, thus this case would indicate a bug.
- assert!(funding_broadcastable.is_none());
- // Given we were just reconnected or finished updating a channel monitor, the
- // only case where we can get a new ChannelMonitorUpdate would be if we also
- // have some commitment updates to send as well.
- assert!($commitment_update.is_some());
- match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
- }
- }
- }
-
macro_rules! handle_cs { () => {
if let Some(update) = $commitment_update {
$channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
handle_cs!();
},
}
+
+ let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
if let Some(tx) = funding_broadcastable {
log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
$self.tx_broadcaster.broadcast_transaction(&tx);
break Ok(());
};
- if chanmon_update_is_none {
- // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
- // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
- // should *never* end up calling back to `chain_monitor.update_channel()`.
- assert!(res.is_ok());
- }
-
(htlc_forwards, res, counterparty_node_id)
} }
}
})
} else { None }
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_state, channel, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let chan_restoration_res;
- let (htlcs_failed_forward, need_lnd_workaround) = {
+ let need_lnd_workaround = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
}
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
chan_restoration_res = handle_chan_restoration_locked!(
- self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
- responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ self, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (responses.holding_cell_failed_htlcs, need_lnd_workaround)
+ need_lnd_workaround
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
post_handle_chan_restoration!(self, chan_restoration_res);
- self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;