X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=75cd72b8f6897416fd482362263af139201fd459;hb=3c0420c39e8bf2b9b2d654a73fcf43899646859b;hp=560a7e58edafb2f8a703dbbc91688c2dac79f993;hpb=16ee24086cda7a87daf14ad94491215da75a6f9b;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 560a7e58..75cd72b8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -47,9 +47,7 @@ use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, Ch use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::Bolt11InvoiceFeatures; -use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; -use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; +use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -66,7 +64,7 @@ use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; use crate::onion_message::{Destination, MessageRouter, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message}; -use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider}; +use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate}; use crate::util::wakers::{Future, Notifier}; @@ -75,6 +73,13 @@ use crate::util::string::UntrustedString; use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::errors::APIError; +#[cfg(not(c_bindings))] +use { + crate::routing::router::DefaultRouter, + crate::routing::gossip::NetworkGraph, + crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}, + crate::sign::KeysManager, +}; use alloc::collections::{btree_map, BTreeMap}; @@ -1150,7 +1155,7 @@ where // | // |__`peer_state` // | -// |__`id_to_peer` +// |__`outpoint_to_peer` // | // |__`short_to_chan_info` // | @@ -1244,11 +1249,7 @@ where /// See `ChannelManager` struct-level documentation for lock order requirements. outbound_scid_aliases: Mutex>, - /// `channel_id` -> `counterparty_node_id`. - /// - /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As - /// multiple channels with the same `temporary_channel_id` to different peers can exist, - /// allowing `temporary_channel_id`s in this map would cause collisions for such channels. + /// Channel funding outpoint -> `counterparty_node_id`. /// /// Note that this map should only be used for `MonitorEvent` handling, to be able to access /// the corresponding channel for the event, as we only have access to the `channel_id` during @@ -1266,7 +1267,10 @@ where /// required to access the channel with the `counterparty_node_id`. /// /// See `ChannelManager` struct-level documentation for lock order requirements. - id_to_peer: Mutex>, + #[cfg(not(test))] + outpoint_to_peer: Mutex>, + #[cfg(test)] + pub(crate) outpoint_to_peer: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. /// @@ -2006,7 +2010,9 @@ macro_rules! handle_error { macro_rules! update_maps_on_chan_removal { ($self: expr, $channel_context: expr) => {{ - $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id()); + if let Some(outpoint) = $channel_context.get_funding_txo() { + $self.outpoint_to_peer.lock().unwrap().remove(&outpoint); + } let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); if let Some(short_id) = $channel_context.get_short_channel_id() { short_to_chan_info.remove(&short_id); @@ -2425,7 +2431,7 @@ where forward_htlcs: Mutex::new(HashMap::new()), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }), pending_intercepted_htlcs: Mutex::new(HashMap::new()), - id_to_peer: Mutex::new(HashMap::new()), + outpoint_to_peer: Mutex::new(HashMap::new()), short_to_chan_info: FairRwLock::new(HashMap::new()), our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(), @@ -2576,7 +2582,7 @@ where fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without - // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside + // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside // of the ChannelMonitor handling. Therefore reallocations may still occur, but is // unlikely as the `short_to_chan_info` map often contains 2 entries for // the same channel. @@ -2609,7 +2615,7 @@ where pub fn list_channels(&self) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without - // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside + // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside // of the ChannelMonitor handling. Therefore reallocations may still occur, but is // unlikely as the `short_to_chan_info` map often contains 2 entries for // the same channel. @@ -3748,9 +3754,10 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + let funding_txo; let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { - let funding_txo = find_funding_output(&chan, &funding_transaction)?; + funding_txo = find_funding_output(&chan, &funding_transaction)?; let logger = WithChannelContext::from(&self.logger, &chan.context); let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger) @@ -3798,9 +3805,9 @@ where panic!("Generated duplicate funding txid?"); }, hash_map::Entry::Vacant(e) => { - let mut id_to_peer = self.id_to_peer.lock().unwrap(); - if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() { - panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); + let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); + if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() { + panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible"); } e.insert(ChannelPhase::UnfundedOutboundV1(chan)); } @@ -5610,6 +5617,7 @@ where } let preimage_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, + counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, }], @@ -5911,9 +5919,9 @@ where Some(cp_id) => cp_id.clone(), None => { // TODO: Once we can rely on the counterparty_node_id from the - // monitor event, this and the id_to_peer map should be removed. - let id_to_peer = self.id_to_peer.lock().unwrap(); - match id_to_peer.get(&funding_txo.to_channel_id()) { + // monitor event, this and the outpoint_to_peer map should be removed. + let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); + match outpoint_to_peer.get(&funding_txo) { Some(cp_id) => cp_id.clone(), None => return, } @@ -6265,50 +6273,61 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (chan, funding_msg_opt, monitor) = + let (mut chan, funding_msg_opt, monitor) = match peer_state.channel_by_id.remove(&msg.temporary_channel_id) { Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => { let logger = WithChannelContext::from(&self.logger, &inbound_chan.context); match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) { Ok(res) => res, - Err((mut inbound_chan, err)) => { + Err((inbound_chan, err)) => { // We've already removed this inbound channel from the map in `PeerState` // above so at this point we just need to clean up any lingering entries // concerning this channel as it is safe to do so. - update_maps_on_chan_removal!(self, &inbound_chan.context); - let user_id = inbound_chan.context.get_user_id(); - let shutdown_res = inbound_chan.context.force_shutdown(false); - return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err), - msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis())); + debug_assert!(matches!(err, ChannelError::Close(_))); + // Really we should be returning the channel_id the peer expects based + // on their funding info here, but they're horribly confused anyway, so + // there's not a lot we can do to save them. + return Err(convert_chan_phase_err!(self, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1); }, } }, - Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)); + Some(mut phase) => { + let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id); + let err = ChannelError::Close(err_msg); + return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1); }, None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id)) }; - match peer_state.channel_by_id.entry(chan.context.channel_id()) { + let funded_channel_id = chan.context.channel_id(); + + macro_rules! fail_chan { ($err: expr) => { { + // Note that at this point we've filled in the funding outpoint on our + // channel, but its actually in conflict with another channel. Thus, if + // we call `convert_chan_phase_err` immediately (thus calling + // `update_maps_on_chan_removal`), we'll remove the existing channel + // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint + // on the channel. + let err = ChannelError::Close($err.to_owned()); + chan.unset_funding_info(msg.temporary_channel_id); + return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1); + } } } + + match peer_state.channel_by_id.entry(funded_channel_id) { hash_map::Entry::Occupied(_) => { - Err(MsgHandleErrInternal::send_err_msg_no_close( - "Already had channel with the new channel_id".to_owned(), - chan.context.channel_id() - )) + fail_chan!("Already had channel with the new channel_id"); }, hash_map::Entry::Vacant(e) => { - let mut id_to_peer_lock = self.id_to_peer.lock().unwrap(); - match id_to_peer_lock.entry(chan.context.channel_id()) { + let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap(); + match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) { hash_map::Entry::Occupied(_) => { - return Err(MsgHandleErrInternal::send_err_msg_no_close( - "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), - chan.context.channel_id())) + fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible"); }, hash_map::Entry::Vacant(i_e) => { let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); if let Ok(persist_state) = monitor_res { i_e.insert(chan.context.get_counterparty_node_id()); - mem::drop(id_to_peer_lock); + mem::drop(outpoint_to_peer_lock); // There's no problem signing a counterparty's funding transaction if our monitor // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't @@ -6331,13 +6350,7 @@ where } else { let logger = WithChannelContext::from(&self.logger, &chan.context); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated"); - let channel_id = match funding_msg_opt { - Some(msg) => msg.channel_id, - None => chan.context.channel_id(), - }; - return Err(MsgHandleErrInternal::send_err_msg_no_close( - "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(), - channel_id)); + fail_chan!("Duplicate funding outpoint"); } } } @@ -7212,9 +7225,9 @@ where Some(cp_id) => Some(cp_id), None => { // TODO: Once we can rely on the counterparty_node_id from the - // monitor event, this and the id_to_peer map should be removed. - let id_to_peer = self.id_to_peer.lock().unwrap(); - id_to_peer.get(&funding_outpoint.to_channel_id()).cloned() + // monitor event, this and the outpoint_to_peer map should be removed. + let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); + outpoint_to_peer.get(&funding_outpoint).cloned() } }; if let Some(counterparty_node_id) = counterparty_node_id_opt { @@ -10249,7 +10262,7 @@ where let channel_count: u64 = Readable::read(reader)?; let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut funded_peer_channels: HashMap>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); - let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); @@ -10327,8 +10340,8 @@ where if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.context.is_funding_broadcast() { - id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); + if let Some(funding_txo) = channel.context.get_funding_txo() { + outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id()); } match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) { hash_map::Entry::Occupied(mut entry) => { @@ -10371,6 +10384,7 @@ where &funding_txo.to_channel_id()); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, + counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); @@ -10662,7 +10676,7 @@ where // We only rebuild the pending payments map if we were most recently serialized by // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { - let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()); + let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0); if counterparty_opt.is_none() { let logger = WithChannelMonitor::from(&args.logger, monitor); for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { @@ -10955,7 +10969,7 @@ where // without the new monitor persisted - we'll end up right back here on // restart. let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); - if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){ + if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) { let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -11033,7 +11047,7 @@ where forward_htlcs: Mutex::new(forward_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), - id_to_peer: Mutex::new(id_to_peer), + outpoint_to_peer: Mutex::new(outpoint_to_peer), short_to_chan_info: FairRwLock::new(short_to_chan_info), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), @@ -11652,8 +11666,8 @@ mod tests { } #[test] - fn test_id_to_peer_coverage() { - // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned + fn test_outpoint_to_peer_coverage() { + // Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned // a `channel_id` (i.e. have had the funding tx created), and that they are removed once // the channel is successfully closed. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -11667,42 +11681,42 @@ mod tests { let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array()); { - // Ensure that the `id_to_peer` map is empty until either party has received the + // Ensure that the `outpoint_to_peer` map is empty until either party has received the // funding transaction, and have the real `channel_id`. - assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); - assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0); } nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); { - // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as + // Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as // as it has the funding transaction. - let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(&channel_id)); + assert!(nodes_0_lock.contains_key(&funding_output)); } - assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); { - let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(&channel_id)); + assert!(nodes_0_lock.contains_key(&funding_output)); } expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); { - // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as - // as it has the funding transaction. - let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + // Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as + // soon as it has the funding transaction. + let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(&channel_id)); + assert!(nodes_1_lock.contains_key(&funding_output)); } check_added_monitors!(nodes[1], 1); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); @@ -11721,23 +11735,23 @@ mod tests { let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); { - // Assert that the channel is kept in the `id_to_peer` map for both nodes until the + // Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the // fee for the closing transaction has been negotiated and the parties has the other // party's signature for the fee negotiated closing transaction.) - let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); + let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); - assert!(nodes_0_lock.contains_key(&channel_id)); + assert!(nodes_0_lock.contains_key(&funding_output)); } { // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature // from `nodes[0]` for the closing transaction with the proposed fee, the channel is - // kept in the `nodes[1]`'s `id_to_peer` map. - let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + // kept in the `nodes[1]`'s `outpoint_to_peer` map. + let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(&channel_id)); + assert!(nodes_1_lock.contains_key(&funding_output)); } nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); @@ -11745,24 +11759,24 @@ mod tests { // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and // therefore has all it needs to fully close the channel (both signatures for the // closing transaction). - // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be + // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be // fully closed by `nodes[0]`. - assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0); - // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]` + // Assert that the channel is still in `nodes[1]`'s `outpoint_to_peer` map, as `nodes[1]` // doesn't have `nodes[0]`'s signature for the closing transaction yet. - let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); + let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap(); assert_eq!(nodes_1_lock.len(), 1); - assert!(nodes_1_lock.contains_key(&channel_id)); + assert!(nodes_1_lock.contains_key(&funding_output)); } let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap()); { - // Assert that the channel has now been removed from both parties `id_to_peer` map once + // Assert that the channel has now been removed from both parties `outpoint_to_peer` map once // they both have everything required to fully close the channel. - assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0); } let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());