X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fchain%2Fchainmonitor.rs;h=de50f5777dde9792ae2ad93a0fcc3f3947b6a9b0;hb=83f0dbc0021335dce183450d7dce7e9f284ff0b6;hp=562c76fa3e2c996f136c54d8fb4adb36baeba802;hpb=bd1206777735696c7aa5ece2f2f2bda6c5a87661;p=rust-lightning diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 562c76fa..de50f577 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -364,8 +364,7 @@ where C::Target: chain::Filter, /// claims which are awaiting confirmation. /// /// Includes the balances from each [`ChannelMonitor`] *except* those included in - /// `ignored_channels`, allowing you to filter out balances from channels which are still open - /// (and whose balance should likely be pulled from the [`ChannelDetails`]). + /// `ignored_channels`. /// /// See [`ChannelMonitor::get_claimable_balances`] for more details on the exact criteria for /// inclusion in the return value. @@ -520,12 +519,13 @@ where C::Target: chain::Filter, pub async fn process_pending_events_async Future>( &self, handler: H ) { - let mut pending_events = Vec::new(); - for monitor_state in self.monitors.read().unwrap().values() { - pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events()); - } - for event in pending_events { - handler(event).await; + // Sadly we can't hold the monitors read lock through an async call. Thus we have to do a + // crazy dance to process a monitor's events then only remove them once we've done so. + let mons_to_process = self.monitors.read().unwrap().keys().cloned().collect::>(); + for funding_txo in mons_to_process { + let mut ev; + super::channelmonitor::process_events_body!( + self.monitors.read().unwrap().get(&funding_txo).map(|m| &m.monitor), ev, handler(ev).await); } } @@ -796,12 +796,8 @@ impl(&self, handler: H) where H::Target: EventHandler { - let mut pending_events = Vec::new(); for monitor_state in self.monitors.read().unwrap().values() { - pending_events.append(&mut monitor_state.monitor.get_and_clear_pending_events()); - } - for event in pending_events { - handler.handle_event(event); + monitor_state.monitor.process_pending_events(&handler); } } } @@ -969,7 +965,8 @@ mod tests { assert!(err.contains("ChannelMonitor storage failure"))); check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update check_closed_broadcast!(nodes[0], true); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, + [nodes[1].node.get_our_node_id()], 100000); // However, as the ChainMonitor is still waiting for the original persistence to complete, // it won't yet release the MonitorEvents. @@ -1016,7 +1013,8 @@ mod tests { // ... however once we get events once, the channel will close, creating a channel-closed // ChannelMonitorUpdate. check_closed_broadcast!(nodes[0], true); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }, + [nodes[1].node.get_our_node_id()], 100000); check_added_monitors!(nodes[0], 1); } }