}
/// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch<'a> {
+pub enum UpdateFulfillCommitFetch {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
- monitor_update: &'a ChannelMonitorUpdate,
+ monitor_update: ChannelMonitorUpdate,
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat: u64,
},
};
self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
UpdateFulfillCommitFetch::NewClaim {
- monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
- .expect("We just pushed the monitor update").update,
+ monitor_update: self.context.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update.clone(),
htlc_value_msat,
}
},
Ok(())
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
/// Public version of the below, checking relevant preconditions first.
/// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
/// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
self.free_holding_cell_htlcs(logger)
/// Frees any pending commitment updates in the holding cell, generating the relevant messages
/// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
where L::Target: Logger,
{
if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
pub fn shutdown<SP: Deref>(
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
- ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where SP::Target: SignerProvider
{
if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
+ self.push_ret_blockable_mon_update(monitor_update)
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
/// Returns the next blocked monitor update, if one exists, and a bool which indicates a
/// further blocked monitor update exists after the next.
- pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
for i in 0..self.context.pending_monitor_updates.len() {
if self.context.pending_monitor_updates[i].blocked {
self.context.pending_monitor_updates[i].blocked = false;
- return Some((&self.context.pending_monitor_updates[i].update,
+ return Some((self.context.pending_monitor_updates[i].update.clone(),
self.context.pending_monitor_updates.len() > i + 1));
}
}
/// it should be immediately given to the user for persisting or `None` if it should be held as
/// blocked.
fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
- -> Option<&ChannelMonitorUpdate> {
+ -> Option<ChannelMonitorUpdate> {
let release_monitor = self.push_blockable_mon_update(update);
- if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
+ if release_monitor { self.context.pending_monitor_updates.last().map(|upd| upd.update.clone()) } else { None }
}
pub fn no_monitor_updates_pending(&self) -> bool {
pub fn send_htlc_and_commit<L: Deref>(
&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
- ) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+ ) -> Result<Option<ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
onion_routing_packet, false, skimmed_fee_msat, logger);
if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
/// [`ChannelMonitorUpdate`] will be returned).
pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
- -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where SP::Target: SignerProvider {
for htlc in self.context.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
+ self.push_ret_blockable_mon_update(monitor_update)
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.context.channel_id,
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), &monitor_update);
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
}
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
+ let update_res = self.chain_monitor.update_channel(funding_txo, &monitor_update);
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
break Err(e);
}
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &monitor_update);
let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
peer_state, per_peer_state, chan);
if let Err(e) = res {
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+ let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), &monitor_update);
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
}
break Ok(());
let funding_txo = chan.get().context.get_funding_txo();
let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
if let Some(monitor_update) = monitor_update_opt {
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), &monitor_update);
let update_id = monitor_update.update_id;
handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
peer_state, per_peer_state, chan)
let funding_txo = chan.get().context.get_funding_txo();
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
- let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+ let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), &monitor_update);
let update_id = monitor_update.update_id;
handle_new_monitor_update!(self, update_res, update_id,
peer_state_lock, peer_state, per_peer_state, chan)
has_monitor_update = true;
let update_res = self.chain_monitor.update_channel(
- funding_txo.expect("channel is live"), monitor_update);
+ funding_txo.expect("channel is live"), &monitor_update);
let update_id = monitor_update.update_id;
let channel_id: [u8; 32] = *channel_id;
let res = handle_new_monitor_update!(self, update_res, update_id,
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
- let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update);
+ let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, &monitor_update);
let update_id = monitor_update.update_id;
if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
peer_state_lck, peer_state, per_peer_state, chan)