DuplicateClaim {},
}
+/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC
+/// state.
+pub(super) struct RAAUpdates {
+ pub commitment_update: Option<msgs::CommitmentUpdate>,
+ pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
+ pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ pub monitor_update: ChannelMonitorUpdate,
+ pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
+}
+
/// If the majority of the channels funds are to the fundee and the initiator holds only just
/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
/// initiator controls the feerate, if they then go to increase the channel fee, they may have no
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<RAAUpdates, ChannelError>
where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
self.monitor_pending_forwards.append(&mut to_forward_infos);
self.monitor_pending_failures.append(&mut revoked_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
- return Ok((None, Vec::new(), Vec::new(), monitor_update, Vec::new()))
+ return Ok(RAAUpdates {
+ commitment_update: None,
+ accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(),
+ monitor_update,
+ holding_cell_failed_htlcs: Vec::new()
+ });
}
match self.free_holding_cell_htlcs(logger)? {
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
+ Ok(RAAUpdates {
+ commitment_update: Some(commitment_update),
+ accepted_htlcs: to_forward_infos,
+ failed_htlcs: revoked_htlcs,
+ monitor_update,
+ holding_cell_failed_htlcs: htlcs_to_fail
+ })
},
(None, htlcs_to_fail) => {
if require_commitment {
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
- Ok((Some(msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs,
- update_fail_malformed_htlcs,
- update_fee: None,
- commitment_signed
- }), to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
+ Ok(RAAUpdates {
+ commitment_update: Some(msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs,
+ update_fail_malformed_htlcs,
+ update_fee: None,
+ commitment_signed
+ }),
+ accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
+ monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
+ })
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
- Ok((None, to_forward_infos, revoked_htlcs, monitor_update, htlcs_to_fail))
+ Ok(RAAUpdates {
+ commitment_update: None,
+ accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
+ monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
+ })
}
}
}
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
- break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
- htlcs_to_fail = htlcs_to_fail_in;
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ let raa_updates = break_chan_entry!(self,
+ chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
+ htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+ if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
if was_frozen_for_monitor {
- assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+ assert!(raa_updates.commitment_update.is_none());
+ assert!(raa_updates.accepted_htlcs.is_empty());
+ assert!(raa_updates.failed_htlcs.is_empty());
break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
} else {
- if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
+ if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
+ RAACommitmentOrder::CommitmentFirst, false,
+ raa_updates.commitment_update.is_some(),
+ raa_updates.accepted_htlcs, raa_updates.failed_htlcs) {
break Err(e);
} else { unreachable!(); }
}
}
- if let Some(updates) = commitment_update {
+ if let Some(updates) = raa_updates.commitment_update {
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id.clone(),
updates,
});
}
- break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
+ break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ chan.get().get_short_channel_id()
+ .expect("RAA should only work on a short-id-available channel"),
+ chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}