X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=f502a3336cadd935a474b4abe89ef189f8432b0f;hb=d66c70eed4c71119f9a44aa31b9f00d3677e7333;hp=23235bc6f234749e2bbfadc0c9a60d86e1cf8e70;hpb=bcf174034a23f93a56aa20001602cd0645361b16;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 23235bc6..f502a333 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -577,13 +577,13 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C // | | // | |__`pending_intercepted_htlcs` // | -// |__`pending_inbound_payments` +// |__`per_peer_state` // | | -// | |__`claimable_payments` -// | | -// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds +// | |__`pending_inbound_payments` +// | | +// | |__`claimable_payments` // | | -// | |__`per_peer_state` +// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds // | | // | |__`peer_state` // | | @@ -1153,12 +1153,12 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. - assert!($self.pending_events.try_lock().is_ok()); - assert!($self.per_peer_state.try_write().is_ok()); + debug_assert!($self.pending_events.try_lock().is_ok()); + debug_assert!($self.per_peer_state.try_write().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -1193,7 +1193,7 @@ macro_rules! handle_error { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.append(&mut msg_events); } - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { if let None = per_peer_state.get(&$counterparty_node_id) { // This shouldn't occour in tests unless an unkown counterparty_node_id @@ -1206,10 +1206,10 @@ macro_rules! handle_error { => { assert_eq!(*data, expected_error_str); if let Some((err_channel_id, _user_channel_id)) = chan_id { - assert_eq!(*channel_id, err_channel_id); + debug_assert_eq!(*channel_id, err_channel_id); } } - _ => panic!("Unexpected event"), + _ => debug_assert!(false, "Unexpected event"), } } } @@ -1709,7 +1709,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); let (result, is_permanent) = handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { @@ -1807,7 +1807,7 @@ where // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.chain_monitor.update_channel(funding_txo, monitor_update); + let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } } @@ -2102,7 +2102,7 @@ where // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { if let Some((err, mut code, chan_update)) = loop { - let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned(); + let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned(); let forwarding_chan_info_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2336,7 +2336,7 @@ where chan) } { Some((update_add, commitment_signed, monitor_update)) => { - let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); let chan_id = chan.get().channel_id(); match (update_err, handle_monitor_update_res!(self, update_err, chan, @@ -2552,7 +2552,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id); if let None = peer_state_mutex_opt { - return Err(APIError::APIMisuseError { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) + return Err(APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) }) } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); @@ -3284,7 +3284,7 @@ where BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, update); + let _ = self.chain_monitor.update_channel(funding_txo, &update); }, } } @@ -3565,14 +3565,17 @@ where /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { - #[cfg(debug_assertions)] + #[cfg(any(feature = "_test_utils", test))] { // Ensure that no peer state channel storage lock is not held when calling this // function. // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `per_peer_state` locks, which calling this - // function with the `per_peer_state` aquired would. - assert!(self.per_peer_state.try_write().is_ok()); + // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling + // this function with any `per_peer_state` peer lock aquired would. + let per_peer_state = self.per_peer_state.read().unwrap(); + for (_, peer) in per_peer_state.iter() { + debug_assert!(peer.try_lock().is_ok()); + } } //TODO: There is a timing attack here where if a node fails an HTLC back to us they can @@ -3807,7 +3810,7 @@ where match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, @@ -3844,7 +3847,7 @@ where } }, Err((e, monitor_update)) => { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { // TODO: This needs to be handled somehow - if we receive a monitor update @@ -3880,7 +3883,7 @@ where }; // We update the ChannelMonitor on the backward link, after // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update); + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); if update_res != ChannelMonitorUpdateStatus::Completed { // TODO: This needs to be handled somehow - if we receive a monitor update // with a preimage we *must* somehow manage to propagate it to the upstream @@ -4193,7 +4196,7 @@ where let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, - counterparty_node_id.clone(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, + counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration, self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias) { Err(e) => { @@ -4449,7 +4452,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update); let (result, is_permanent) = handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); if is_permanent { @@ -4650,13 +4653,13 @@ where Err((None, e)) => try_chan_entry!(self, Err(e), chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update); + let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update); try_chan_entry!(self, Err(e), chan); unreachable!(); }, Ok(res) => res }; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update); if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { return Err(e); } @@ -4792,7 +4795,7 @@ where let raa_updates = break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan); htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update); + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update); if was_paused_for_mon_update { assert!(update_res != ChannelMonitorUpdateStatus::Completed); assert!(raa_updates.commitment_update.is_none()); @@ -5097,7 +5100,7 @@ where )); } if let Some((commitment_update, monitor_update)) = commitment_opt { - match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => { pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: chan.get_counterparty_node_id(), @@ -6264,7 +6267,7 @@ pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by /// [`ChannelManager`]. pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures { - ChannelTypeFeatures::from_counterparty_init(&provided_init_features(config)) + ChannelTypeFeatures::from_init(&provided_init_features(config)) } /// Fetches the set of [`InitFeatures`] flags which are provided by or required by @@ -6285,6 +6288,12 @@ pub fn provided_init_features(_config: &UserConfig) -> InitFeatures { features.set_channel_type_optional(); features.set_scid_privacy_optional(); features.set_zero_conf_optional(); + #[cfg(anchors)] + { // Attributes are not allowed on if expressions on our current MSRV of 1.41. + if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { + features.set_anchors_zero_fee_htlc_tx_optional(); + } + } features } @@ -6739,6 +6748,8 @@ where } } + let per_peer_state = self.per_peer_state.write().unwrap(); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); let claimable_payments = self.claimable_payments.lock().unwrap(); let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap(); @@ -6754,7 +6765,6 @@ where htlc_purposes.push(purpose); } - let per_peer_state = self.per_peer_state.write().unwrap(); (per_peer_state.len() as u64).write(writer)?; for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { peer_pubkey.write(writer)?; @@ -7023,7 +7033,9 @@ where let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel<::Signer> = Channel::read(reader, (&args.entropy_source, &args.signer_provider, best_block_height))?; + let mut channel: Channel<::Signer> = Channel::read(reader, ( + &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) + ))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -7116,7 +7128,7 @@ where } } - for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { + for (funding_txo, monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id())); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); @@ -8307,6 +8319,42 @@ mod tests { nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); } + + #[cfg(anchors)] + #[test] + fn test_anchors_zero_fee_htlc_tx_fallback() { + // Tests that if both nodes support anchors, but the remote node does not want to accept + // anchor channels at the moment, an error it sent to the local node such that it can retry + // the channel without the anchors feature. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.manually_accept_inbound_channels = true; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx()); + + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); + let events = nodes[1].node.get_and_clear_pending_events(); + match events[0] { + Event::OpenChannelRequest { temporary_channel_id, .. } => { + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + } + _ => panic!("Unexpected event"), + } + + let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); + + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx()); + + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]