+ /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
+ /// [`Event`] being handled) completes, this should be called to restore the channel to normal
+ /// operation. It will double-check that nothing *else* is also blocking the same channel from
+ /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly.
+ fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+ let mut errors = Vec::new();
+ loop {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+
+ if let Some(blocker) = completed_blocker.take() {
+ // Only do this on the first iteration of the loop.
+ if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
+ .get_mut(&channel_funding_outpoint.to_channel_id())
+ {
+ blockers.retain(|iter| iter != &blocker);
+ }
+ }
+
+ if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+ channel_funding_outpoint, counterparty_node_id) {
+ // Check that, while holding the peer lock, we don't have anything else
+ // blocking monitor updates for this channel. If we do, release the monitor
+ // update(s) when those blockers complete.
+ log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ break;
+ }
+
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+ if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
+ log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update);
+ let update_id = monitor_update.update_id;
+ if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
+ peer_state_lck, peer_state, per_peer_state, chan)
+ {
+ errors.push((e, counterparty_node_id));
+ }
+ if further_update_exists {
+ // If there are more `ChannelMonitorUpdate`s to process, restart at the
+ // top of the loop.
+ continue;
+ }
+ } else {
+ log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+ }
+ }
+ } else {
+ log_debug!(self.logger,
+ "Got a release post-RAA monitor update for peer {} but the channel is gone",
+ log_pubkey!(counterparty_node_id));
+ }
+ break;
+ }
+ for (err, counterparty_node_id) in errors {
+ let res = Err::<(), _>(err);
+ let _ = handle_error!(self, res, counterparty_node_id);
+ }
+ }
+
+ fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
+ for action in actions {
+ match action {
+ EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+ channel_funding_outpoint, counterparty_node_id
+ } => {
+ self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+ }
+ }
+ }
+ }
+