/// let mut channel_monitors = read_channel_monitors();
/// let args = ChannelManagerReadArgs::new(
/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
-/// router, message_router, logger, default_config, channel_monitors.iter_mut().collect(),
+/// router, message_router, logger, default_config, channel_monitors.iter().collect(),
/// );
/// let (block_hash, channel_manager) =
/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
/// same way you would handle a [`chain::Filter`] call using
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
-/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
-/// 5) Disconnect/connect blocks on the [`ChannelManager`].
-/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
+/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
+/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
+/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
+/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
+/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
+/// otherwise not required.
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
/// the next step.
/// this struct.
///
/// This is not exported to bindings users because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
+ pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
}
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
+ mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
) -> Self {
Self {
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
// them to ensure we can write and reload our ChannelManager.
{
let mut channel_monitors = new_hash_map();
- for monitor in deserialized_monitors.iter_mut() {
+ for monitor in deserialized_monitors.iter() {
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
}
let mut node_read = &chanman_encoded[..];
let (_, node_deserialized) = {
let mut channel_monitors = new_hash_map();
- for monitor in monitors_read.iter_mut() {
+ for monitor in monitors_read.iter() {
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
}
<(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster,
logger: &logger,
- channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+ channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
}) { } else {
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
};
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster,
logger: &logger,
- channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+ channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
}).unwrap();
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());