X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=e553e8e534055ae9cf3a8d64e0acc83aa04e30a6;hb=0e83e91d7ae6e402ca24164e920cf12b486cd17c;hp=29c0141539b1a1a76d919938b43abb2b2ecb7616;hpb=341163eeb599a8c5c79e06f0e6d6e8fb2a1e756d;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 29c01415..e553e8e5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2042,12 +2042,14 @@ macro_rules! handle_monitor_update_completion { } macro_rules! handle_new_monitor_update { - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { { - // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in - // any case so that it won't deadlock. - debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread); + ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { { debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire)); match $update_res { + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!($self.logger, "{}", err_str); + panic!("{}", err_str); + }, ChannelMonitorUpdateStatus::InProgress => { log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.", &$chan.context.channel_id()); @@ -2059,23 +2061,11 @@ macro_rules! handle_new_monitor_update { }, } } }; - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => { - handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, - $per_peer_state_lock, $chan, _internal, $remove, + ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => { + handle_new_monitor_update!($self, $update_res, $chan, _internal, handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => { - if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() { - handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, - $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() }) - } else { - // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to - // update). Throwing away a monitor update could be dangerous, so we assert even in - // release builds. - panic!("Initial Monitors should not exist for non-funded channels"); - } - }; - ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { { + ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in @@ -2087,8 +2077,7 @@ macro_rules! handle_new_monitor_update { in_flight_updates.len() - 1 }); let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]); - handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state, - $per_peer_state_lock, $chan, _internal, $remove, + handle_new_monitor_update!($self, update_res, $chan, _internal, { let _ = in_flight_updates.remove(idx); if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 { @@ -2096,17 +2085,6 @@ macro_rules! handle_new_monitor_update { } }) } }; - ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => { - if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() { - handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, - $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() }) - } else { - // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to - // update). Throwing away a monitor update could be dangerous, so we assert even in - // release builds. - panic!("Monitor updates should not exist for non-funded channels"); - } - } } macro_rules! process_events_body { @@ -2551,7 +2529,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_phase_entry); + peer_state_lock, peer_state, per_peer_state, chan); break; } @@ -3325,7 +3303,7 @@ where }, onion_packet, None, &self.fee_estimator, &self.logger); match break_chan_phase_entry!(self, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) { + match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -4524,9 +4502,13 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) { hash_map::Entry::Occupied(mut chan_phase) => { - updated_chan = true; - handle_new_monitor_update!(self, funding_txo, update.clone(), - peer_state_lock, peer_state, per_peer_state, chan_phase); + if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { + updated_chan = true; + handle_new_monitor_update!(self, funding_txo, update.clone(), + peer_state_lock, peer_state, per_peer_state, chan); + } else { + debug_assert!(false, "We shouldn't have an update for a non-funded channel"); + } }, hash_map::Entry::Vacant(_) => {}, } @@ -5259,7 +5241,7 @@ where } if !during_init { handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, - peer_state, per_peer_state, chan_phase_entry); + peer_state, per_peer_state, chan); } else { // If we're running during init we cannot update a monitor directly - // they probably haven't actually been loaded yet. Instead, push the @@ -5903,7 +5885,6 @@ where // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't // accepted payment from yet. We do, however, need to wait to send our channel_ready // until we have persisted our monitor. - let new_channel_id = funding_msg.channel_id; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { node_id: counterparty_node_id.clone(), msg: funding_msg, @@ -5911,8 +5892,7 @@ where if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) { handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR, - { peer_state.channel_by_id.remove(&new_channel_id) }); + per_peer_state, chan, INITIAL_MONITOR); } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -5947,7 +5927,7 @@ where let monitor = try_chan_phase_entry!(self, chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry); if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { - handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR); + handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); Ok(()) } else { try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry) @@ -6054,7 +6034,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_phase_entry); + peer_state_lock, peer_state, per_peer_state, chan); } }, ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => { @@ -6306,7 +6286,7 @@ where let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, - peer_state, per_peer_state, chan_phase_entry); + peer_state, per_peer_state, chan); } Ok(()) } else { @@ -6480,7 +6460,7 @@ where let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); handle_new_monitor_update!(self, funding_txo, monitor_update, - peer_state_lock, peer_state, per_peer_state, chan_phase_entry); + peer_state_lock, peer_state, per_peer_state, chan); } htlcs_to_fail } else { @@ -6777,10 +6757,8 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - let channel_id: ChannelId = *channel_id; handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, - peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING, - peer_state.channel_by_id.remove(&channel_id)); + peer_state_lock, peer_state, per_peer_state, chan); continue 'peer_loop; } } @@ -7089,7 +7067,6 @@ where /// operation. It will double-check that nothing *else* is also blocking the same channel from /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { - let mut errors = Vec::new(); loop { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) { @@ -7122,7 +7099,7 @@ where log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor", channel_funding_outpoint.to_channel_id()); handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, - peer_state_lck, peer_state, per_peer_state, chan_phase_entry); + peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. @@ -7141,10 +7118,6 @@ where } break; } - for (err, counterparty_node_id) in errors { - let res = Err::<(), _>(err); - let _ = handle_error!(self, res, counterparty_node_id); - } } fn handle_post_event_actions(&self, actions: Vec) { @@ -10132,7 +10105,7 @@ mod tests { TEST_FINAL_CLTV, false), 100_000); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &(), &random_seed_bytes + None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -10166,7 +10139,7 @@ mod tests { let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &(), &random_seed_bytes + None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -10223,7 +10196,7 @@ mod tests { ); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &(), &random_seed_bytes + None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); let payment_id_2 = PaymentId([45; 32]); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), @@ -10274,7 +10247,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &(), &random_seed_bytes + nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -10319,7 +10292,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &(), &random_seed_bytes + nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]);