Merge pull request #2895 from TheBlueMatt/2024-02-logging-tweaks
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 4d7be7d92b273e295e0a21d43197e5737d7e1da7..55b2cbae441f5870498ab703787b2772ed9a7310 100644 (file)
@@ -44,6 +44,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
@@ -903,7 +904,9 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+               !self.channel_by_id.iter().any(|(_, phase)|
+                       matches!(phase, ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_))
+               )
                        && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
@@ -975,6 +978,7 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
+               Arc<KeysManager>,
                Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
                ProbabilisticScoringFeeParameters,
                ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
@@ -1005,6 +1009,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
                &'e DefaultRouter<
                        &'f NetworkGraph<&'g L>,
                        &'g L,
+                       &'c KeysManager,
                        &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
                        ProbabilisticScoringFeeParameters,
                        ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
@@ -1801,6 +1806,14 @@ pub struct ChannelDetails {
        ///
        /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
        pub config: Option<ChannelConfig>,
+       /// Pending inbound HTLCs.
+       ///
+       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+       pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
+       /// Pending outbound HTLCs.
+       ///
+       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+       pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
 }
 
 impl ChannelDetails {
@@ -1878,6 +1891,8 @@ impl ChannelDetails {
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
                        channel_shutdown_state: Some(context.shutdown_state()),
+                       pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
+                       pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
                }
        }
 }
@@ -2152,6 +2167,7 @@ macro_rules! emit_channel_pending_event {
                                counterparty_node_id: $channel.context.get_counterparty_node_id(),
                                user_channel_id: $channel.context.get_user_id(),
                                funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               channel_type: Some($channel.context.get_channel_type().clone()),
                        }, None));
                        $channel.context.set_channel_pending_event_emitted();
                }
@@ -3005,8 +3021,8 @@ where
        /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
        /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        ///
-       /// You can always get the latest local transaction(s) to broadcast from
-       /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+       /// You can always broadcast the latest local transaction(s) via
+       /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
        pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
@@ -4817,10 +4833,6 @@ where
 
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
-                       if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
-                               log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
-                       }
                        return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
@@ -5951,7 +5963,7 @@ where
                                // TODO: Once we can rely on the counterparty_node_id from the
                                // monitor event, this and the outpoint_to_peer map should be removed.
                                let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
-                               match outpoint_to_peer.get(&funding_txo) {
+                               match outpoint_to_peer.get(funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
@@ -5964,7 +5976,7 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
@@ -6178,12 +6190,14 @@ where
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
-               if msg.chain_hash != self.chain_hash {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
+               if msg.common_fields.chain_hash != self.chain_hash {
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
+                                msg.common_fields.temporary_channel_id.clone()));
                }
 
                if !self.default_configuration.accept_inbound_channels {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
+                                msg.common_fields.temporary_channel_id.clone()));
                }
 
                // Get the number of peers with channels, but without funded ones. We don't care too much
@@ -6196,7 +6210,9 @@ where
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                    .ok_or_else(|| {
                                debug_assert!(false);
-                               MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
+                               MsgHandleErrInternal::send_err_msg_no_close(
+                                       format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
+                                       msg.common_fields.temporary_channel_id.clone())
                        })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
@@ -6210,20 +6226,22 @@ where
                {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
-                               msg.temporary_channel_id.clone()));
+                               msg.common_fields.temporary_channel_id.clone()));
                }
 
                let best_block_height = self.best_block.read().unwrap().height();
                if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
-                               msg.temporary_channel_id.clone()));
+                               msg.common_fields.temporary_channel_id.clone()));
                }
 
-               let channel_id = msg.temporary_channel_id;
+               let channel_id = msg.common_fields.temporary_channel_id;
                let channel_exists = peer_state.has_channel(&channel_id);
                if channel_exists {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               "temporary_channel_id collision for the same peer!".to_owned(),
+                               msg.common_fields.temporary_channel_id.clone()));
                }
 
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
@@ -6231,13 +6249,13 @@ where
                        let channel_type = channel::channel_type_from_open_channel(
                                        &msg, &peer_state.latest_features, &self.channel_type_features()
                                ).map_err(|e|
-                                       MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+                                       MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
                                )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
-                               temporary_channel_id: msg.temporary_channel_id.clone(),
+                               temporary_channel_id: msg.common_fields.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
-                               funding_satoshis: msg.funding_satoshis,
+                               funding_satoshis: msg.common_fields.funding_satoshis,
                                push_msat: msg.push_msat,
                                channel_type,
                        }, None));
@@ -6257,17 +6275,21 @@ where
                        &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
                {
                        Err(e) => {
-                               return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
+                               return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id));
                        },
                        Ok(res) => res
                };
 
                let channel_type = channel.context.get_channel_type();
                if channel_type.requires_zero_conf() {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               "No zero confirmation channels accepted".to_owned(),
+                               msg.common_fields.temporary_channel_id.clone()));
                }
                if channel_type.requires_anchors_zero_fee_htlc_tx() {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                               "No channels with anchor outputs accepted".to_owned(),
+                               msg.common_fields.temporary_channel_id.clone()));
                }
 
                let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
@@ -6289,11 +6311,11 @@ where
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
-                                       MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+                                       MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+                       match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut phase) => {
                                        match phase.get_mut() {
                                                ChannelPhase::UnfundedOutboundV1(chan) => {
@@ -6301,16 +6323,16 @@ where
                                                        (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
                                                },
                                                _ => {
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
                                                }
                                        }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
                pending_events.push_back((events::Event::FundingGenerationReady {
-                       temporary_channel_id: msg.temporary_channel_id,
+                       temporary_channel_id: msg.common_fields.temporary_channel_id,
                        counterparty_node_id: *counterparty_node_id,
                        channel_value_satoshis: value,
                        output_script,
@@ -7968,7 +7990,6 @@ where
        /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
        fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
                let recipient = self.get_our_node_id();
-               let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
 
                let peers = self.per_peer_state.read().unwrap()
@@ -7978,7 +7999,7 @@ where
                        .collect::<Vec<_>>();
 
                self.router
-                       .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
+                       .create_blinded_paths(recipient, peers, secp_ctx)
                        .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
 
@@ -7987,7 +8008,6 @@ where
        fn create_blinded_payment_paths(
                &self, amount_msats: u64, payment_secret: PaymentSecret
        ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
-               let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
 
                let first_hops = self.list_usable_channels();
@@ -8002,7 +8022,7 @@ where
                        },
                };
                self.router.create_blinded_payment_paths(
-                       payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
+                       payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
                )
        }
 
@@ -8699,7 +8719,7 @@ where
        fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
-                        msg.temporary_channel_id.clone())), *counterparty_node_id);
+                        msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
 
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
@@ -8715,7 +8735,7 @@ where
        fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
-                        msg.temporary_channel_id.clone())), *counterparty_node_id);
+                        msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
 
        fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
@@ -8910,10 +8930,12 @@ where
                                                        }
                                                        &mut chan.context
                                                },
-                                               // Unfunded channels will always be removed.
-                                               ChannelPhase::UnfundedOutboundV1(chan) => {
-                                                       &mut chan.context
+                                               // We retain UnfundedOutboundV1 channel for some time in case
+                                               // peer unexpectedly disconnects, and intends to reconnect again.
+                                               ChannelPhase::UnfundedOutboundV1(_) => {
+                                                       return true;
                                                },
+                                               // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
@@ -9052,15 +9074,31 @@ where
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 
-                               peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
-                               ).for_each(|chan| {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                               node_id: chan.context.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&&logger),
-                                       });
-                               });
+                               for (_, phase) in peer_state.channel_by_id.iter_mut() {
+                                       match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_channel_reestablish(&&logger),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_open_channel(self.chain_hash),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedInboundV1(_) => {
+                                                       // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+                                                       // they are not persisted and won't be recovered after a crash.
+                                                       // Therefore, they shouldn't exist at this point.
+                                                       debug_assert!(false);
+                                               }
+                                       }
+                               }
                        }
 
                        return NotifyOption::SkipPersistHandleEvents;
@@ -9459,6 +9497,8 @@ impl Writeable for ChannelDetails {
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
                        (41, self.channel_shutdown_state, option),
+                       (43, self.pending_inbound_htlcs, optional_vec),
+                       (45, self.pending_outbound_htlcs, optional_vec),
                });
                Ok(())
        }
@@ -9497,6 +9537,8 @@ impl Readable for ChannelDetails {
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
                        (41, channel_shutdown_state, option),
+                       (43, pending_inbound_htlcs, optional_vec),
+                       (45, pending_outbound_htlcs, optional_vec),
                });
 
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
@@ -9533,6 +9575,8 @@ impl Readable for ChannelDetails {
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
                        channel_shutdown_state,
+                       pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
+                       pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
                })
        }
 }
@@ -11085,7 +11129,7 @@ where
                                                        downstream_counterparty_and_funding_outpoint:
                                                                Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
-                                                       if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+                                                       if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                        blocked_channel_id);
@@ -11862,8 +11906,8 @@ mod tests {
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 
-               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
-               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
        }
 
        fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
@@ -11995,14 +12039,15 @@ mod tests {
                                check_added_monitors!(nodes[0], 1);
                                expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
                        }
-                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+                       open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
-               open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+               open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
+                       &nodes[0].keys_manager);
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
-                       open_channel_msg.temporary_channel_id);
+                       open_channel_msg.common_fields.temporary_channel_id);
 
                // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
                // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
@@ -12050,11 +12095,11 @@ mod tests {
                for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
                        nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
-                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+                       open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
-                       open_channel_msg.temporary_channel_id);
+                       open_channel_msg.common_fields.temporary_channel_id);
 
                // Of course, however, outbound channels are always allowed
                nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
@@ -12090,14 +12135,14 @@ mod tests {
                for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
                        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+                       open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
                // rejected.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
-                       open_channel_msg.temporary_channel_id);
+                       open_channel_msg.common_fields.temporary_channel_id);
 
                // but we can still open an outbound channel.
                nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
@@ -12106,7 +12151,7 @@ mod tests {
                // but even with such an outbound channel, additional inbound channels will still fail.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
-                       open_channel_msg.temporary_channel_id);
+                       open_channel_msg.common_fields.temporary_channel_id);
        }
 
        #[test]
@@ -12142,7 +12187,7 @@ mod tests {
                                _ => panic!("Unexpected event"),
                        }
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
-                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
+                       open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // If we try to accept a channel from another peer non-0conf it will fail.
@@ -12164,7 +12209,7 @@ mod tests {
                        _ => panic!("Unexpected event"),
                }
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
-                       open_channel_msg.temporary_channel_id);
+                       open_channel_msg.common_fields.temporary_channel_id);
 
                // ...however if we accept the same channel 0conf it should work just fine.
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
@@ -12309,7 +12354,7 @@ mod tests {
 
                nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-               assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
+               assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
 
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                let events = nodes[1].node.get_and_clear_pending_events();
@@ -12324,7 +12369,7 @@ mod tests {
                nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
 
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-               assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
+               assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
 
                // Since nodes[1] should not have accepted the channel, it should
                // not have generated any events.