]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Move `Channel::channel_id` and some other methods to `ChannelContext` impl
authorDuncan Dean <git@dunxen.dev>
Wed, 7 Jun 2023 10:15:24 +0000 (12:15 +0200)
committerDuncan Dean <git@dunxen.dev>
Wed, 14 Jun 2023 11:42:26 +0000 (13:42 +0200)
This is one of a series of commits to make sure methods are moved by
chunks so they are easily reviewable in diffs. Unfortunately they are
not purely move-only as fields need to be updated for things to
compile, but these should be quite clear.

This commit also uses the `context` field where needed for compilation
and tests to pass due to the above change.

lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/payment_tests.rs

index b1cd603f021d61564af75f2aa8216452973feb0a..74ce190a45dd323aeddf58f8321e6b0d89cc90db 100644 (file)
@@ -830,6 +830,230 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        pub fn is_live(&self) -> bool {
                self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
        }
+
+       // Public utilities:
+
+       pub fn channel_id(&self) -> [u8; 32] {
+               self.channel_id
+       }
+
+       // Return the `temporary_channel_id` used during channel establishment.
+       //
+       // Will return `None` for channels created prior to LDK version 0.0.115.
+       pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+               self.temporary_channel_id
+       }
+
+       pub fn minimum_depth(&self) -> Option<u32> {
+               self.minimum_depth
+       }
+
+       /// Gets the "user_id" value passed into the construction of this channel. It has no special
+       /// meaning and exists only to allow users to have a persistent identifier of a channel.
+       pub fn get_user_id(&self) -> u128 {
+               self.user_id
+       }
+
+       /// Gets the channel's type
+       pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
+               &self.channel_type
+       }
+
+       /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
+       /// is_usable() returns true).
+       /// Allowed in any state (including after shutdown)
+       pub fn get_short_channel_id(&self) -> Option<u64> {
+               self.short_channel_id
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
+               self.latest_inbound_scid_alias
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn outbound_scid_alias(&self) -> u64 {
+               self.outbound_scid_alias
+       }
+
+       /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
+       /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
+       pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
+               assert_eq!(self.outbound_scid_alias, 0);
+               self.outbound_scid_alias = outbound_scid_alias;
+       }
+
+       /// Returns the funding_txo we either got from our peer, or were given by
+       /// get_outbound_funding_created.
+       pub fn get_funding_txo(&self) -> Option<OutPoint> {
+               self.channel_transaction_parameters.funding_outpoint
+       }
+
+       /// Returns the block hash in which our funding transaction was confirmed.
+       pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
+               self.funding_tx_confirmed_in
+       }
+
+       /// Returns the current number of confirmations on the funding transaction.
+       pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
+               if self.funding_tx_confirmation_height == 0 {
+                       // We either haven't seen any confirmation yet, or observed a reorg.
+                       return 0;
+               }
+
+               height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
+       }
+
+       fn get_holder_selected_contest_delay(&self) -> u16 {
+               self.channel_transaction_parameters.holder_selected_contest_delay
+       }
+
+       fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
+               &self.channel_transaction_parameters.holder_pubkeys
+       }
+
+       pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
+               self.channel_transaction_parameters.counterparty_parameters
+                       .as_ref().map(|params| params.selected_contest_delay)
+       }
+
+       fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
+               &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn get_counterparty_node_id(&self) -> PublicKey {
+               self.counterparty_node_id
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
+               self.holder_htlc_minimum_msat
+       }
+
+       /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
+       pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
+               self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn get_announced_htlc_max_msat(&self) -> u64 {
+               return cmp::min(
+                       // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
+                       // to use full capacity. This is an effort to reduce routing failures, because in many cases
+                       // channel might have been used to route very small values (either by honest users or as DoS).
+                       self.channel_value_satoshis * 1000 * 9 / 10,
+
+                       self.counterparty_max_htlc_value_in_flight_msat
+               );
+       }
+
+       /// Allowed in any state (including after shutdown)
+       pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
+               self.counterparty_htlc_minimum_msat
+       }
+
+       /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
+       pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
+               self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
+       }
+
+       fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
+               self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
+                       let holder_reserve = self.holder_selected_channel_reserve_satoshis;
+                       cmp::min(
+                               (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
+                               party_max_htlc_value_in_flight_msat
+                       )
+               })
+       }
+
+       pub fn get_value_satoshis(&self) -> u64 {
+               self.channel_value_satoshis
+       }
+
+       pub fn get_fee_proportional_millionths(&self) -> u32 {
+               self.config.options.forwarding_fee_proportional_millionths
+       }
+
+       pub fn get_cltv_expiry_delta(&self) -> u16 {
+               cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
+       }
+
+       pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
+               self.config.options.max_dust_htlc_exposure_msat
+       }
+
+       /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
+       pub fn prev_config(&self) -> Option<ChannelConfig> {
+               self.prev_config.map(|prev_config| prev_config.0)
+       }
+
+       // Checks whether we should emit a `ChannelPending` event.
+       pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
+               self.is_funding_initiated() && !self.channel_pending_event_emitted
+       }
+
+       // Returns whether we already emitted a `ChannelPending` event.
+       pub(crate) fn channel_pending_event_emitted(&self) -> bool {
+               self.channel_pending_event_emitted
+       }
+
+       // Remembers that we already emitted a `ChannelPending` event.
+       pub(crate) fn set_channel_pending_event_emitted(&mut self) {
+               self.channel_pending_event_emitted = true;
+       }
+
+       // Checks whether we should emit a `ChannelReady` event.
+       pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
+               self.is_usable() && !self.channel_ready_event_emitted
+       }
+
+       // Remembers that we already emitted a `ChannelReady` event.
+       pub(crate) fn set_channel_ready_event_emitted(&mut self) {
+               self.channel_ready_event_emitted = true;
+       }
+
+       /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
+       /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
+       /// no longer be considered when forwarding HTLCs.
+       pub fn maybe_expire_prev_config(&mut self) {
+               if self.prev_config.is_none() {
+                       return;
+               }
+               let prev_config = self.prev_config.as_mut().unwrap();
+               prev_config.1 += 1;
+               if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
+                       self.prev_config = None;
+               }
+       }
+
+       /// Returns the current [`ChannelConfig`] applied to the channel.
+       pub fn config(&self) -> ChannelConfig {
+               self.config.options
+       }
+
+       /// Updates the channel's config. A bool is returned indicating whether the config update
+       /// applied resulted in a new ChannelUpdate message.
+       pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
+               let did_channel_update =
+                       self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
+                       self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
+                       self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
+               if did_channel_update {
+                       self.prev_config = Some((self.config.options, 0));
+                       // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
+                       // policy change to propagate throughout the network.
+                       self.update_time_counter += 1;
+               }
+               self.config.options = *config;
+               did_channel_update
+       }
+
+       /// Returns true if funding_created was sent/received.
+       pub fn is_funding_initiated(&self) -> bool {
+               self.channel_state >= ChannelState::FundingSent as u32
+       }
 }
 
 // Internal utility functions for channels
@@ -1634,7 +1858,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
                        commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
-                       get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()),
+                       get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()),
                        log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
 
                macro_rules! get_htlc_in_commitment {
@@ -1786,9 +2010,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let mut value_to_a = if local { value_to_self } else { value_to_remote };
                let mut value_to_b = if local { value_to_remote } else { value_to_self };
                let (funding_pubkey_a, funding_pubkey_b) = if local {
-                       (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
+                       (self.context.get_holder_pubkeys().funding_pubkey, self.context.get_counterparty_pubkeys().funding_pubkey)
                } else {
-                       (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
+                       (self.context.get_counterparty_pubkeys().funding_pubkey, self.context.get_holder_pubkeys().funding_pubkey)
                };
 
                if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
@@ -1922,9 +2146,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// TODO Some magic rust shit to compile-time check this?
        fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
                let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx);
-               let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
-               let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
-               let counterparty_pubkeys = self.get_counterparty_pubkeys();
+               let delayed_payment_base = &self.context.get_holder_pubkeys().delayed_payment_basepoint;
+               let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint;
+               let counterparty_pubkeys = self.context.get_counterparty_pubkeys();
 
                TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
        }
@@ -1936,9 +2160,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
                //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
                //may see payments to it!
-               let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
-               let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
-               let counterparty_pubkeys = self.get_counterparty_pubkeys();
+               let revocation_basepoint = &self.context.get_holder_pubkeys().revocation_basepoint;
+               let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint;
+               let counterparty_pubkeys = self.context.get_counterparty_pubkeys();
 
                TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
        }
@@ -1947,7 +2171,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// pays to get_funding_redeemscript().to_v0_p2wsh()).
        /// Panics if called before accept_channel/new_from_req
        pub fn get_funding_redeemscript(&self) -> Script {
-               make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
+               make_funding_redeemscript(&self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
 
        /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
@@ -1998,7 +2222,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        InboundHTLCState::LocalRemoved(ref reason) => {
                                                if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
                                                } else {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
                                                return UpdateFulfillFetch::DuplicateClaim {};
@@ -2051,7 +2275,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
@@ -2061,7 +2285,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state);
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
@@ -2087,7 +2311,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        monitor_update,
                        htlc_value_msat,
                        msg: Some(msgs::UpdateFulfillHTLC {
-                               channel_id: self.channel_id(),
+                               channel_id: self.context.channel_id(),
                                htlc_id: htlc_id_arg,
                                payment_preimage: payment_preimage_arg,
                        }),
@@ -2231,7 +2455,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
+                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
                                htlc_id: htlc_id_arg,
                                err_packet,
@@ -2239,14 +2463,14 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Ok(None);
                }
 
-               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
+               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
                }
 
                Ok(Some(msgs::UpdateFailHTLC {
-                       channel_id: self.channel_id(),
+                       channel_id: self.context.channel_id(),
                        htlc_id: htlc_id_arg,
                        reason: err_packet
                }))
@@ -2396,7 +2620,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
                                log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
                                encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
-                               encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
+                               encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
                        secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
 
@@ -2406,7 +2630,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
                                .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
@@ -2416,7 +2640,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        fn counterparty_funding_pubkey(&self) -> &PublicKey {
-               &self.get_counterparty_pubkeys().funding_pubkey
+               &self.context.get_counterparty_pubkeys().funding_pubkey
        }
 
        pub fn funding_created<SP: Deref, L: Deref>(
@@ -2467,7 +2691,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        initial_commitment_tx,
                        msg.signature,
                        Vec::new(),
-                       &self.get_holder_pubkeys().funding_pubkey,
+                       &self.context.get_holder_pubkeys().funding_pubkey,
                        self.counterparty_funding_pubkey()
                );
 
@@ -2478,12 +2702,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let funding_redeemscript = self.get_funding_redeemscript();
                let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
-               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
                let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
                let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
                monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
                let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
-                                                         shutdown_script, self.get_holder_selected_contest_delay(),
+                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
                                                          &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
@@ -2497,7 +2721,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
+               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
 
                let need_channel_ready = self.check_get_channel_ready(0).is_some();
                self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
@@ -2539,7 +2763,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
@@ -2548,7 +2772,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                        let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
                        // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
-                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
                                return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
                        }
                }
@@ -2557,7 +2781,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        initial_commitment_tx,
                        msg.signature,
                        Vec::new(),
-                       &self.get_holder_pubkeys().funding_pubkey,
+                       &self.context.get_holder_pubkeys().funding_pubkey,
                        self.counterparty_funding_pubkey()
                );
 
@@ -2566,14 +2790,14 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
 
                let funding_redeemscript = self.get_funding_redeemscript();
-               let funding_txo = self.get_funding_txo().unwrap();
+               let funding_txo = self.context.get_funding_txo().unwrap();
                let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
-               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
                let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
                let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
                monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
                let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
-                                                         shutdown_script, self.get_holder_selected_contest_delay(),
+                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
                                                          &self.context.destination_script, (funding_txo, funding_txo_script),
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
@@ -2587,7 +2811,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
+               log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
 
                let need_channel_ready = self.check_get_channel_ready(0).is_some();
                self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
@@ -2661,7 +2885,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
                self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
 
-               log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
+               log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
 
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
@@ -2849,17 +3073,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                         self.context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
                };
                let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 {
                        remaining_msat_below_dust_exposure_limit =
-                               Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                               Some(self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
 
                let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 {
                        remaining_msat_below_dust_exposure_limit = Some(cmp::min(
                                remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
-                               self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                               self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
                }
 
@@ -3166,9 +3390,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
                        let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+                       if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+                                       on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -3176,9 +3400,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
                        let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+                       if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+                                       on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -3219,7 +3443,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
                                // Note that if the pending_forward_status is not updated here, then it's because we're already failing
                                // the HTLC, i.e. its status is already set to failing.
-                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
+                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                } else {
@@ -3347,7 +3571,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
                                log_bytes!(msg.signature.serialize_compact()[..]),
                                log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
-                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
+                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
                                return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
@@ -3409,7 +3633,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
                        if let Some(_) = htlc.transaction_output_index {
                                let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
-                                       self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
+                                       self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
                                        false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
@@ -3417,7 +3641,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
-                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
+                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
                                if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
                                        return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
@@ -3439,7 +3663,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        commitment_stats.tx,
                        msg.signature,
                        msg.htlc_signatures.clone(),
-                       &self.get_holder_pubkeys().funding_pubkey,
+                       &self.context.get_holder_pubkeys().funding_pubkey,
                        self.counterparty_funding_pubkey()
                );
 
@@ -3537,7 +3761,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                } else { false };
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+                       log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
                self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
                return Ok(self.push_ret_blockable_mon_update(monitor_update));
        }
@@ -3558,7 +3782,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
-                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id()));
+                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
 
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
@@ -3585,7 +3809,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                                                match e {
                                                                        ChannelError::Ignore(ref msg) => {
                                                                                log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
-                                                                                       log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
+                                                                                       log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
                                                                                // If we fail to send here, then this HTLC should
                                                                                // be failed backwards. Failing to send here
                                                                                // indicates that this HTLC may keep being put back
@@ -3650,7 +3874,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        monitor_update.updates.append(&mut additional_update.updates);
 
                        log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
-                               log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 
                        self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
@@ -3733,7 +3957,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
                }
 
-               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id()));
+               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
                let mut to_forward_infos = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut finalized_claimed_htlcs = Vec::new();
@@ -3862,7 +4086,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.context.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.context.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
-                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
+                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
                        return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
                }
 
@@ -3887,11 +4111,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        monitor_update.updates.append(&mut additional_update.updates);
 
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
-                                               log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+                                               log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
                                        self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                        Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
                                } else {
-                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
+                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
                                        self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                        Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
                                }
@@ -3941,11 +4165,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
                let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+               if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
-               if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+               if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
@@ -4043,7 +4267,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.sent_message_awaiting_response = None;
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
-               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
+               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -4116,7 +4340,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.context.monitor_pending_channel_ready = false;
                        let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                        Some(msgs::ChannelReady {
-                               channel_id: self.channel_id(),
+                               channel_id: self.context.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.context.outbound_scid_alias),
                        })
@@ -4152,7 +4376,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.monitor_pending_commitment_signed = false;
                let order = self.context.resend_order.clone();
                log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
-                       log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+                       log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
@@ -4182,11 +4406,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        let outbound_stats = self.get_outbound_pending_htlc_stats(None);
                        let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                        let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-                       if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+                       if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
                                        msg.feerate_per_kw, holder_tx_dust_exposure)));
                        }
-                       if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+                       if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
                                        msg.feerate_per_kw, counterparty_tx_dust_exposure)));
                        }
@@ -4215,7 +4439,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
                                update_add_htlcs.push(msgs::UpdateAddHTLC {
-                                       channel_id: self.channel_id(),
+                                       channel_id: self.context.channel_id(),
                                        htlc_id: htlc.htlc_id,
                                        amount_msat: htlc.amount_msat,
                                        payment_hash: htlc.payment_hash,
@@ -4230,14 +4454,14 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                match reason {
                                        &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
                                                update_fail_htlcs.push(msgs::UpdateFailHTLC {
-                                                       channel_id: self.channel_id(),
+                                                       channel_id: self.context.channel_id(),
                                                        htlc_id: htlc.htlc_id,
                                                        reason: err_packet.clone()
                                                });
                                        },
                                        &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
                                                update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
-                                                       channel_id: self.channel_id(),
+                                                       channel_id: self.context.channel_id(),
                                                        htlc_id: htlc.htlc_id,
                                                        sha256_of_onion: sha256_of_onion.clone(),
                                                        failure_code: failure_code.clone(),
@@ -4245,7 +4469,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        },
                                        &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
                                                update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
-                                                       channel_id: self.channel_id(),
+                                                       channel_id: self.context.channel_id(),
                                                        htlc_id: htlc.htlc_id,
                                                        payment_preimage: payment_preimage.clone(),
                                                });
@@ -4256,13 +4480,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
                        Some(msgs::UpdateFee {
-                               channel_id: self.channel_id(),
+                               channel_id: self.context.channel_id(),
                                feerate_per_kw: self.context.pending_update_fee.unwrap().0,
                        })
                } else { None };
 
                log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
-                               log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
                msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -4366,7 +4590,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                        return Ok(ReestablishResponses {
                                channel_ready: Some(msgs::ChannelReady {
-                                       channel_id: self.channel_id(),
+                                       channel_id: self.context.channel_id(),
                                        next_per_commitment_point,
                                        short_channel_id_alias: Some(self.context.outbound_scid_alias),
                                }),
@@ -4405,7 +4629,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
                        let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                        Some(msgs::ChannelReady {
-                               channel_id: self.channel_id(),
+                               channel_id: self.context.channel_id(),
                                next_per_commitment_point,
                                short_channel_id_alias: Some(self.context.outbound_scid_alias),
                        })
@@ -4413,9 +4637,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                if msg.next_local_commitment_number == next_counterparty_commitment_number {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
                        }
 
                        Ok(ReestablishResponses {
@@ -4426,9 +4650,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
                        }
 
                        if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
@@ -4695,7 +4919,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
 
-               let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize();
+               let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
                let counterparty_funding_key = self.counterparty_funding_pubkey().serialize();
                let mut holder_sig = sig.serialize_der().to_vec();
                holder_sig.push(EcdsaSighashType::All as u8);
@@ -4747,7 +4971,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
                let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
 
-               match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+               match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
                        Ok(_) => {},
                        Err(_e) => {
                                // The remote end may have decided to revoke their output due to inconsistent dust
@@ -4865,224 +5089,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
        }
 
-       // Public utilities:
-
-       pub fn channel_id(&self) -> [u8; 32] {
-               self.context.channel_id
-       }
-
-       // Return the `temporary_channel_id` used during channel establishment.
-       //
-       // Will return `None` for channels created prior to LDK version 0.0.115.
-       pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
-               self.context.temporary_channel_id
-       }
-
-       pub fn minimum_depth(&self) -> Option<u32> {
-               self.context.minimum_depth
-       }
-
-       /// Gets the "user_id" value passed into the construction of this channel. It has no special
-       /// meaning and exists only to allow users to have a persistent identifier of a channel.
-       pub fn get_user_id(&self) -> u128 {
-               self.context.user_id
-       }
-
-       /// Gets the channel's type
-       pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
-               &self.context.channel_type
-       }
-
-       /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
-       /// is_usable() returns true).
-       /// Allowed in any state (including after shutdown)
-       pub fn get_short_channel_id(&self) -> Option<u64> {
-               self.context.short_channel_id
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
-               self.context.latest_inbound_scid_alias
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn outbound_scid_alias(&self) -> u64 {
-               self.context.outbound_scid_alias
-       }
-       /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
-       /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
-       pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
-               assert_eq!(self.context.outbound_scid_alias, 0);
-               self.context.outbound_scid_alias = outbound_scid_alias;
-       }
-
-       /// Returns the funding_txo we either got from our peer, or were given by
-       /// get_outbound_funding_created.
-       pub fn get_funding_txo(&self) -> Option<OutPoint> {
-               self.context.channel_transaction_parameters.funding_outpoint
-       }
-
-       /// Returns the block hash in which our funding transaction was confirmed.
-       pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
-               self.context.funding_tx_confirmed_in
-       }
-
-       /// Returns the current number of confirmations on the funding transaction.
-       pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
-               if self.context.funding_tx_confirmation_height == 0 {
-                       // We either haven't seen any confirmation yet, or observed a reorg.
-                       return 0;
-               }
-
-               height.checked_sub(self.context.funding_tx_confirmation_height).map_or(0, |c| c + 1)
-       }
-
-       fn get_holder_selected_contest_delay(&self) -> u16 {
-               self.context.channel_transaction_parameters.holder_selected_contest_delay
-       }
-
-       fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
-               &self.context.channel_transaction_parameters.holder_pubkeys
-       }
-
-       pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
-               self.context.channel_transaction_parameters.counterparty_parameters
-                       .as_ref().map(|params| params.selected_contest_delay)
-       }
-
-       fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
-               &self.context.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn get_counterparty_node_id(&self) -> PublicKey {
-               self.context.counterparty_node_id
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
-               self.context.holder_htlc_minimum_msat
-       }
-
-       /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
-       pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
-               self.get_htlc_maximum_msat(self.context.holder_max_htlc_value_in_flight_msat)
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn get_announced_htlc_max_msat(&self) -> u64 {
-               return cmp::min(
-                       // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
-                       // to use full capacity. This is an effort to reduce routing failures, because in many cases
-                       // channel might have been used to route very small values (either by honest users or as DoS).
-                       self.context.channel_value_satoshis * 1000 * 9 / 10,
-
-                       self.context.counterparty_max_htlc_value_in_flight_msat
-               );
-       }
-
-       /// Allowed in any state (including after shutdown)
-       pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
-               self.context.counterparty_htlc_minimum_msat
-       }
-
-       /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
-       pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
-               self.get_htlc_maximum_msat(self.context.counterparty_max_htlc_value_in_flight_msat)
-       }
-
-       fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
-               self.context.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
-                       let holder_reserve = self.context.holder_selected_channel_reserve_satoshis;
-                       cmp::min(
-                               (self.context.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
-                               party_max_htlc_value_in_flight_msat
-                       )
-               })
-       }
-
-       pub fn get_value_satoshis(&self) -> u64 {
-               self.context.channel_value_satoshis
-       }
-
-       pub fn get_fee_proportional_millionths(&self) -> u32 {
-               self.context.config.options.forwarding_fee_proportional_millionths
-       }
-
-       pub fn get_cltv_expiry_delta(&self) -> u16 {
-               cmp::max(self.context.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
-       }
-
-       pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
-               self.context.config.options.max_dust_htlc_exposure_msat
-       }
-
-       /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
-       pub fn prev_config(&self) -> Option<ChannelConfig> {
-               self.context.prev_config.map(|prev_config| prev_config.0)
-       }
-
-       // Checks whether we should emit a `ChannelPending` event.
-       pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
-               self.is_funding_initiated() && !self.context.channel_pending_event_emitted
-       }
-
-       // Returns whether we already emitted a `ChannelPending` event.
-       pub(crate) fn channel_pending_event_emitted(&self) -> bool {
-               self.context.channel_pending_event_emitted
-       }
-
-       // Remembers that we already emitted a `ChannelPending` event.
-       pub(crate) fn set_channel_pending_event_emitted(&mut self) {
-               self.context.channel_pending_event_emitted = true;
-       }
-
-       // Checks whether we should emit a `ChannelReady` event.
-       pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
-               self.context.is_usable() && !self.context.channel_ready_event_emitted
-       }
-
-       // Remembers that we already emitted a `ChannelReady` event.
-       pub(crate) fn set_channel_ready_event_emitted(&mut self) {
-               self.context.channel_ready_event_emitted = true;
-       }
-
-       /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
-       /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
-       /// no longer be considered when forwarding HTLCs.
-       pub fn maybe_expire_prev_config(&mut self) {
-               if self.context.prev_config.is_none() {
-                       return;
-               }
-               let prev_config = self.context.prev_config.as_mut().unwrap();
-               prev_config.1 += 1;
-               if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
-                       self.context.prev_config = None;
-               }
-       }
-
-       /// Returns the current [`ChannelConfig`] applied to the channel.
-       pub fn config(&self) -> ChannelConfig {
-               self.context.config.options
-       }
-
-       /// Updates the channel's config. A bool is returned indicating whether the config update
-       /// applied resulted in a new ChannelUpdate message.
-       pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
-               let did_channel_update =
-                       self.context.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
-                       self.context.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
-                       self.context.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
-               if did_channel_update {
-                       self.context.prev_config = Some((self.context.config.options, 0));
-                       // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
-                       // policy change to propagate throughout the network.
-                       self.context.update_time_counter += 1;
-               }
-               self.context.config.options = *config;
-               did_channel_update
-       }
-
        fn internal_htlc_satisfies_config(
                &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
        ) -> Result<(), (&'static str, u16)> {
@@ -5110,9 +5116,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        pub fn htlc_satisfies_config(
                &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
        ) -> Result<(), (&'static str, u16)> {
-               self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.config())
+               self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
                        .or_else(|err| {
-                               if let Some(prev_config) = self.prev_config() {
+                               if let Some(prev_config) = self.context.prev_config() {
                                        self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
                                } else {
                                        Err(err)
@@ -5250,11 +5256,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
        }
 
-       /// Returns true if funding_created was sent/received.
-       pub fn is_funding_initiated(&self) -> bool {
-               self.context.channel_state >= ChannelState::FundingSent as u32
-       }
-
        /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
        /// If the channel is outbound, this implies we have not yet broadcasted the funding
        /// transaction. If the channel is inbound, this implies simply that the channel has not
@@ -5397,7 +5398,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                NS::Target: NodeSigner,
                L::Target: Logger
        {
-               if let Some(funding_txo) = self.get_funding_txo() {
+               if let Some(funding_txo) = self.context.get_funding_txo() {
                        for &(index_in_block, tx) in txdata.iter() {
                                // Check if the transaction is the expected funding transaction, and if it is,
                                // check that it pays the right amount to the right script.
@@ -5448,7 +5449,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }
                                for inp in tx.input.iter() {
                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
-                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
+                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
                                                return Err(ClosureReason::CommitmentTxConfirmed);
                                        }
                                }
@@ -5597,7 +5598,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
 
                let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-               let keys = self.get_holder_pubkeys();
+               let keys = self.context.get_holder_pubkeys();
 
                msgs::OpenChannel {
                        chain_hash,
@@ -5609,7 +5610,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
                        htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
                        feerate_per_kw: self.context.feerate_per_kw as u32,
-                       to_self_delay: self.get_holder_selected_contest_delay(),
+                       to_self_delay: self.context.get_holder_selected_contest_delay(),
                        max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
                        funding_pubkey: keys.funding_pubkey,
                        revocation_basepoint: keys.revocation_basepoint,
@@ -5667,7 +5668,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
        fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
                let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-               let keys = self.get_holder_pubkeys();
+               let keys = self.context.get_holder_pubkeys();
 
                msgs::AcceptChannel {
                        temporary_channel_id: self.context.channel_id,
@@ -5676,7 +5677,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
                        htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
                        minimum_depth: self.context.minimum_depth.unwrap(),
-                       to_self_delay: self.get_holder_selected_contest_delay(),
+                       to_self_delay: self.context.get_holder_selected_contest_delay(),
                        max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
                        funding_pubkey: keys.funding_pubkey,
                        revocation_basepoint: keys.revocation_basepoint,
@@ -5784,17 +5785,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
                        .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
-               let counterparty_node_id = NodeId::from_pubkey(&self.get_counterparty_node_id());
+               let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
                let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
 
                let msg = msgs::UnsignedChannelAnnouncement {
                        features: channelmanager::provided_channel_features(&user_config),
                        chain_hash,
-                       short_channel_id: self.get_short_channel_id().unwrap(),
+                       short_channel_id: self.context.get_short_channel_id().unwrap(),
                        node_id_1: if were_node_one { node_id } else { counterparty_node_id },
                        node_id_2: if were_node_one { counterparty_node_id } else { node_id },
-                       bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }),
-                       bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.get_holder_pubkeys().funding_pubkey }),
+                       bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }),
+                       bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
                        excess_data: Vec::new(),
                };
 
@@ -5826,7 +5827,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return None;
                }
 
-               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id()));
+               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
                let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
                        Ok(a) => a,
                        Err(e) => {
@@ -5851,8 +5852,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
 
                Some(msgs::AnnouncementSignatures {
-                       channel_id: self.channel_id(),
-                       short_channel_id: self.get_short_channel_id().unwrap(),
+                       channel_id: self.context.channel_id(),
+                       short_channel_id: self.context.get_short_channel_id().unwrap(),
                        node_signature: our_node_sig,
                        bitcoin_signature: our_bitcoin_sig,
                })
@@ -5895,10 +5896,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
 
-               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() {
+               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
                        return Err(ChannelError::Close(format!(
                                "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
-                                &announcement, self.get_counterparty_node_id())));
+                                &announcement, self.context.get_counterparty_node_id())));
                }
                if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() {
                        return Err(ChannelError::Close(format!(
@@ -5948,15 +5949,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
                let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
                        let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
-                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
+                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
                        remote_last_secret
                } else {
-                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
+                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
                        [0;32]
                };
                self.mark_awaiting_response();
                msgs::ChannelReestablish {
-                       channel_id: self.channel_id(),
+                       channel_id: self.context.channel_id(),
                        // The protocol has two different commitment number concepts - the "commitment
                        // transaction number", which starts from 0 and counts up, and the "revocation key
                        // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
@@ -6208,14 +6209,14 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
                                encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
                                &counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()),
-                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id()));
+                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
 
                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
                                log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
-                                       encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+                                       encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
                                        encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
-                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
+                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
                        }
                }
 
@@ -6386,7 +6387,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
                // return them to fail the payment.
                let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
-               let counterparty_node_id = self.get_counterparty_node_id();
+               let counterparty_node_id = self.context.get_counterparty_node_id();
                for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
                        match htlc_update {
                                HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
@@ -6395,7 +6396,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                _ => {}
                        }
                }
-               let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+               let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
                        // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
                        // returning a channel monitor update here would imply a channel monitor update before
                        // we even registered the channel monitor to begin with, which is invalid.
@@ -6405,7 +6406,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
                        if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
                                self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
-                               Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+                               Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
                                        update_id: self.context.latest_monitor_update_id,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
                                }))
@@ -7917,7 +7918,7 @@ mod tests {
 
                                        let ref htlc = htlcs[$htlc_idx];
                                        let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
-                                               chan.get_counterparty_selected_contest_delay().unwrap(),
+                                               chan.context.get_counterparty_selected_contest_delay().unwrap(),
                                                &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
                                        let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
index 75b17caead24991d597cd1907b931d45aac383a3..61fca301fa07237cbb521a618886421882ea8f28 100644 (file)
@@ -1473,9 +1473,9 @@ impl ChannelDetails {
                let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
                        channel.get_holder_counterparty_selected_channel_reserve_satoshis();
                ChannelDetails {
-                       channel_id: channel.channel_id(),
+                       channel_id: channel.context.channel_id(),
                        counterparty: ChannelCounterparty {
-                               node_id: channel.get_counterparty_node_id(),
+                               node_id: channel.context.get_counterparty_node_id(),
                                features: latest_features,
                                unspendable_punishment_reserve: to_remote_reserve_satoshis,
                                forwarding_info: channel.counterparty_forwarding_info(),
@@ -1485,17 +1485,17 @@ impl ChannelDetails {
                                // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
                                // default `0` value set by `Channel::new_outbound`.
                                outbound_htlc_minimum_msat: if channel.context.have_received_message() {
-                                       Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
-                               outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+                                       Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None },
+                               outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(),
                        },
-                       funding_txo: channel.get_funding_txo(),
+                       funding_txo: channel.context.get_funding_txo(),
                        // Note that accept_channel (or open_channel) is always the first message, so
                        // `have_received_message` indicates that type negotiation has completed.
-                       channel_type: if channel.context.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
-                       short_channel_id: channel.get_short_channel_id(),
-                       outbound_scid_alias: if channel.context.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
-                       inbound_scid_alias: channel.latest_inbound_scid_alias(),
-                       channel_value_satoshis: channel.get_value_satoshis(),
+                       channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None },
+                       short_channel_id: channel.context.get_short_channel_id(),
+                       outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None },
+                       inbound_scid_alias: channel.context.latest_inbound_scid_alias(),
+                       channel_value_satoshis: channel.context.get_value_satoshis(),
                        feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
                        unspendable_punishment_reserve: to_self_reserve_satoshis,
                        balance_msat: balance.balance_msat,
@@ -1503,17 +1503,17 @@ impl ChannelDetails {
                        outbound_capacity_msat: balance.outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
                        next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
-                       user_channel_id: channel.get_user_id(),
-                       confirmations_required: channel.minimum_depth(),
-                       confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
-                       force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
+                       user_channel_id: channel.context.get_user_id(),
+                       confirmations_required: channel.context.minimum_depth(),
+                       confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)),
+                       force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(),
                        is_outbound: channel.context.is_outbound(),
                        is_channel_ready: channel.context.is_usable(),
                        is_usable: channel.context.is_live(),
                        is_public: channel.context.should_announce(),
-                       inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
-                       inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
-                       config: Some(channel.config()),
+                       inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()),
+                       inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(),
+                       config: Some(channel.context.config()),
                }
        }
 }
@@ -1615,9 +1615,9 @@ macro_rules! handle_error {
 
 macro_rules! update_maps_on_chan_removal {
        ($self: expr, $channel: expr) => {{
-               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+               $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id());
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
-               if let Some(short_id) = $channel.get_short_channel_id() {
+               if let Some(short_id) = $channel.context.get_short_channel_id() {
                        short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
@@ -1626,10 +1626,10 @@ macro_rules! update_maps_on_chan_removal {
                        // also don't want a counterparty to be able to trivially cause a memory leak by simply
                        // opening a million channels with us which are closed before we ever reach the funding
                        // stage.
-                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
+                       let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               short_to_chan_info.remove(&$channel.outbound_scid_alias());
+               short_to_chan_info.remove(&$channel.context.outbound_scid_alias());
        }}
 }
 
@@ -1647,7 +1647,7 @@ macro_rules! convert_chan_err {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
                                update_maps_on_chan_removal!($self, $channel);
                                let shutdown_res = $channel.force_shutdown(true);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
                }
@@ -1697,18 +1697,18 @@ macro_rules! remove_channel {
 macro_rules! send_channel_ready {
        ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
-                       node_id: $channel.get_counterparty_node_id(),
+                       node_id: $channel.context.get_counterparty_node_id(),
                        msg: $channel_ready_msg,
                });
                // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
-               let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
-               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+               let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
                        "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
-               if let Some(real_scid) = $channel.get_short_channel_id() {
-                       let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
-                       assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+               if let Some(real_scid) = $channel.context.get_short_channel_id() {
+                       let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+                       assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
                                "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                }
        }}
@@ -1716,30 +1716,30 @@ macro_rules! send_channel_ready {
 
 macro_rules! emit_channel_pending_event {
        ($locked_events: expr, $channel: expr) => {
-               if $channel.should_emit_channel_pending_event() {
+               if $channel.context.should_emit_channel_pending_event() {
                        $locked_events.push_back((events::Event::ChannelPending {
-                               channel_id: $channel.channel_id(),
-                               former_temporary_channel_id: $channel.temporary_channel_id(),
-                               counterparty_node_id: $channel.get_counterparty_node_id(),
-                               user_channel_id: $channel.get_user_id(),
-                               funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               channel_id: $channel.context.channel_id(),
+                               former_temporary_channel_id: $channel.context.temporary_channel_id(),
+                               counterparty_node_id: $channel.context.get_counterparty_node_id(),
+                               user_channel_id: $channel.context.get_user_id(),
+                               funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
                        }, None));
-                       $channel.set_channel_pending_event_emitted();
+                       $channel.context.set_channel_pending_event_emitted();
                }
        }
 }
 
 macro_rules! emit_channel_ready_event {
        ($locked_events: expr, $channel: expr) => {
-               if $channel.should_emit_channel_ready_event() {
-                       debug_assert!($channel.channel_pending_event_emitted());
+               if $channel.context.should_emit_channel_ready_event() {
+                       debug_assert!($channel.context.channel_pending_event_emitted());
                        $locked_events.push_back((events::Event::ChannelReady {
-                               channel_id: $channel.channel_id(),
-                               user_channel_id: $channel.get_user_id(),
-                               counterparty_node_id: $channel.get_counterparty_node_id(),
-                               channel_type: $channel.get_channel_type().clone(),
+                               channel_id: $channel.context.channel_id(),
+                               user_channel_id: $channel.context.get_user_id(),
+                               counterparty_node_id: $channel.context.get_counterparty_node_id(),
+                               channel_type: $channel.context.get_channel_type().clone(),
                        }, None));
-                       $channel.set_channel_ready_event_emitted();
+                       $channel.context.set_channel_ready_event_emitted();
                }
        }
 }
@@ -1749,7 +1749,7 @@ macro_rules! handle_monitor_update_completion {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
                        &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
-               let counterparty_node_id = $chan.get_counterparty_node_id();
+               let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
                        // We only send a channel_update in the case where we are just now sending a
                        // channel_ready and the channel is in a usable state. We may re-send a
@@ -1765,7 +1765,7 @@ macro_rules! handle_monitor_update_completion {
                } else { None };
 
                let update_actions = $peer_state.monitor_update_blocked_actions
-                       .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+                       .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
 
                let htlc_forwards = $self.handle_channel_resumption(
                        &mut $peer_state.pending_msg_events, $chan, updates.raa,
@@ -1776,7 +1776,7 @@ macro_rules! handle_monitor_update_completion {
                        $peer_state.pending_msg_events.push(upd);
                }
 
-               let channel_id = $chan.channel_id();
+               let channel_id = $chan.context.channel_id();
                core::mem::drop($peer_state_lock);
                core::mem::drop($per_peer_state_lock);
 
@@ -1804,16 +1804,16 @@ macro_rules! handle_new_monitor_update {
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
-                                       log_bytes!($chan.channel_id()[..]));
+                                       log_bytes!($chan.context.channel_id()[..]));
                                Ok(())
                        },
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
-                                       log_bytes!($chan.channel_id()[..]));
+                                       log_bytes!($chan.context.channel_id()[..]));
                                update_maps_on_chan_removal!($self, $chan);
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
-                                       "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
-                                       $chan.get_user_id(), $chan.force_shutdown(false),
+                                       "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
+                                       $chan.context.get_user_id(), $chan.force_shutdown(false),
                                        $self.get_channel_update_for_broadcast(&$chan).ok()));
                                $remove;
                                res
@@ -2057,7 +2057,7 @@ where
                };
                let res = channel.get_open_channel(self.genesis_hash.clone());
 
-               let temporary_channel_id = channel.channel_id();
+               let temporary_channel_id = channel.context.channel_id();
                match peer_state.channel_by_id.entry(temporary_channel_id) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(fuzzing) {
@@ -2171,14 +2171,14 @@ where
                match channel.unbroadcasted_funding() {
                        Some(transaction) => {
                                pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: channel.channel_id(), transaction
+                                       channel_id: channel.context.channel_id(), transaction
                                }, None));
                        },
                        None => {},
                }
                pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: channel.channel_id(),
-                       user_channel_id: channel.get_user_id(),
+                       channel_id: channel.context.channel_id(),
+                       user_channel_id: channel.context.get_user_id(),
                        reason: closure_reason
                }, None));
        }
@@ -2197,7 +2197,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
                                        let their_features = &peer_state.latest_features;
                                        let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
                                                .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
@@ -2353,7 +2353,7 @@ where
                        });
                }
 
-               Ok(chan.get_counterparty_node_id())
+               Ok(chan.context.get_counterparty_node_id())
        }
 
        fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
@@ -2656,7 +2656,7 @@ where
                                                        // we don't allow forwards outbound over them.
                                                        break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
                                                }
-                                               if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
+                                               if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
                                                        // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
                                                        // "refuse to forward unless the SCID alias was used", so we pretend
                                                        // we don't have the channel here.
@@ -2679,7 +2679,7 @@ where
                                                                break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
                                                        }
                                                }
-                                               if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+                                               if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
                                                        break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
                                                }
                                                if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
@@ -2771,10 +2771,10 @@ where
                                action: msgs::ErrorAction::IgnoreError
                        });
                }
-               if chan.get_short_channel_id().is_none() {
+               if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -2790,8 +2790,8 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
-               let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
@@ -2799,8 +2799,8 @@ where
                self.get_channel_update_for_onion(short_channel_id, chan)
        }
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
-               let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
                        ChannelUpdateStatus::Enabled => true,
@@ -2814,11 +2814,11 @@ where
                        short_channel_id,
                        timestamp: chan.context.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
-                       cltv_expiry_delta: chan.get_cltv_expiry_delta(),
-                       htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
-                       htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
+                       cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
+                       htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
+                       htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
                        fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
-                       fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+                       fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
                // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
@@ -2869,7 +2869,7 @@ where
                                if !chan.get().context.is_live() {
                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                }
-                               let funding_txo = chan.get().get_funding_txo().unwrap();
+                               let funding_txo = chan.get().context.get_funding_txo().unwrap();
                                let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
                                        htlc_cltv, HTLCSource::OutboundRoute {
                                                path: path.clone(),
@@ -3106,7 +3106,7 @@ where
 
                                let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                        .map_err(|e| if let ChannelError::Close(msg) = e {
-                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+                                               MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None)
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (funding_msg, chan),
@@ -3114,7 +3114,7 @@ where
                                                mem::drop(peer_state_lock);
                                                mem::drop(per_peer_state);
 
-                                               let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
+                                               let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id());
                                                return Err(APIError::ChannelUnavailable {
                                                        err: "Signer refused to sign the initial commitment transaction".to_owned()
                                                });
@@ -3131,16 +3131,16 @@ where
                };
 
                peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
-                       node_id: chan.get_counterparty_node_id(),
+                       node_id: chan.context.get_counterparty_node_id(),
                        msg,
                });
-               match peer_state.channel_by_id.entry(chan.channel_id()) {
+               match peer_state.channel_by_id.entry(chan.context.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
                                let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                               if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
                                        panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
                                }
                                e.insert(chan);
@@ -3218,7 +3218,7 @@ where
                        let mut output_index = None;
                        let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
                        for (idx, outp) in tx.output.iter().enumerate() {
-                               if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+                               if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
                                        if output_index.is_some() {
                                                return Err(APIError::APIMisuseError {
                                                        err: "Multiple outputs matched the expected script and value".to_owned()
@@ -3282,16 +3282,16 @@ where
                }
                for channel_id in channel_ids {
                        let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                       let mut config = channel.config();
+                       let mut config = channel.context.config();
                        config.apply(config_update);
-                       if !channel.update_config(&config) {
+                       if !channel.context.update_config(&config) {
                                continue;
                        }
                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
                                peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
                        } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                       node_id: channel.get_counterparty_node_id(),
+                                       node_id: channel.context.get_counterparty_node_id(),
                                        msg,
                                });
                        }
@@ -3365,7 +3365,7 @@ where
                                                        err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
                                                })
                                        }
-                                       chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+                                       chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
                                None => return Err(APIError::ChannelUnavailable {
                                        err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
@@ -3591,7 +3591,7 @@ where
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
                                                                                                HTLCFailReason::reason(failure_code, data),
-                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                }
@@ -4067,7 +4067,7 @@ where
                                                        _ => {},
                                                }
 
-                                               chan.maybe_expire_prev_config();
+                                               chan.context.maybe_expire_prev_config();
 
                                                if chan.should_disconnect_peer_awaiting_response() {
                                                        log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
@@ -4228,9 +4228,9 @@ where
                // we're not leaking that we have a channel with the counterparty), otherwise we try to use
                // an inbound SCID alias before the real SCID.
                let scid_pref = if chan.context.should_announce() {
-                       chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+                       chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias())
                } else {
-                       chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+                       chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id())
                };
                if let Some(scid) = scid_pref {
                        self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
@@ -4492,7 +4492,7 @@ where
                                let mut peer_state_lock = peer_state_opt.unwrap();
                                let peer_state = &mut *peer_state_lock;
                                if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
-                                       let counterparty_node_id = chan.get().get_counterparty_node_id();
+                                       let counterparty_node_id = chan.get().context.get_counterparty_node_id();
                                        let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
 
                                        if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
@@ -4618,7 +4618,7 @@ where
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
                log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
-                       log_bytes!(channel.channel_id()),
+                       log_bytes!(channel.context.channel_id()),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
@@ -4627,10 +4627,10 @@ where
 
                let mut htlc_forwards = None;
 
-               let counterparty_node_id = channel.get_counterparty_node_id();
+               let counterparty_node_id = channel.context.get_counterparty_node_id();
                if !pending_forwards.is_empty() {
-                       htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
-                               channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+                       htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
+                               channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
                }
 
                if let Some(msg) = channel_ready {
@@ -4778,9 +4778,9 @@ where
                                }
                                if accept_0conf {
                                        channel.get_mut().set_0conf();
-                               } else if channel.get().get_channel_type().requires_zero_conf() {
+                               } else if channel.get().context.get_channel_type().requires_zero_conf() {
                                        let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                               node_id: channel.get().get_counterparty_node_id(),
+                                               node_id: channel.get().context.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage{
                                                        msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
                                                }
@@ -4794,7 +4794,7 @@ where
                                        // channels per-peer we can accept channels from a peer with existing ones.
                                        if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
                                                let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                                       node_id: channel.get().get_counterparty_node_id(),
+                                                       node_id: channel.get().context.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage{
                                                                msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
                                                        }
@@ -4806,7 +4806,7 @@ where
                                }
 
                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                                       node_id: channel.get().get_counterparty_node_id(),
+                                       node_id: channel.get().context.get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
                                });
                        }
@@ -4845,8 +4845,8 @@ where
        ) -> usize {
                let mut num_unfunded_channels = 0;
                for (_, chan) in peer.channel_by_id.iter() {
-                       if !chan.context.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
-                               chan.get_funding_tx_confirmations(best_block_height) == 0
+                       if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+                               chan.context.get_funding_tx_confirmations(best_block_height) == 0
                        {
                                num_unfunded_channels += 1;
                        }
@@ -4911,14 +4911,14 @@ where
                        },
                        Ok(res) => res
                };
-               match peer_state.channel_by_id.entry(channel.channel_id()) {
+               match peer_state.channel_by_id.entry(channel.context.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
                        },
                        hash_map::Entry::Vacant(entry) => {
                                if !self.default_configuration.manually_accept_inbound_channels {
-                                       if channel.get_channel_type().requires_zero_conf() {
+                                       if channel.context.get_channel_type().requires_zero_conf() {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
                                        }
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
@@ -4932,7 +4932,7 @@ where
                                                counterparty_node_id: counterparty_node_id.clone(),
                                                funding_satoshis: msg.funding_satoshis,
                                                push_msat: msg.push_msat,
-                                               channel_type: channel.get_channel_type().clone(),
+                                               channel_type: channel.context.get_channel_type().clone(),
                                        }, None));
                                }
 
@@ -4955,7 +4955,7 @@ where
                        match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
-                                       (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+                                       (chan.get().context.get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        }
@@ -4996,14 +4996,14 @@ where
                                Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
-                               match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
+                               match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
                                                        funding_msg.channel_id))
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
-                                               i_e.insert(chan.get_counterparty_node_id());
+                                               i_e.insert(chan.context.get_counterparty_node_id());
                                        }
                                }
 
@@ -5053,7 +5053,7 @@ where
                        hash_map::Entry::Occupied(mut chan) => {
                                let monitor = try_chan_entry!(self,
                                        chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
-                               let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+                               let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
                                let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                        // We weren't able to watch the channel to begin with, so no updates should be made on
@@ -5083,7 +5083,7 @@ where
                                let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
                                        self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
@@ -5094,7 +5094,7 @@ where
                                        // counterparty's announcement_signatures. Thus, we only bother to send a
                                        // channel_update here if the channel is not public, i.e. we're not sending an
                                        // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
                                        if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: counterparty_node_id.clone(),
@@ -5134,7 +5134,7 @@ where
                                                        if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
                                        }
 
-                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let funding_txo_opt = chan_entry.get().context.get_funding_txo();
                                        let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
                                                chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
                                        dropped_htlcs = htlcs;
@@ -5340,7 +5340,7 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               let funding_txo = chan.get().get_funding_txo();
+                               let funding_txo = chan.get().context.get_funding_txo();
                                let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
                                if let Some(monitor_update) = monitor_update_opt {
                                        let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
@@ -5479,7 +5479,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       let funding_txo = chan.get().get_funding_txo();
+                                       let funding_txo = chan.get().context.get_funding_txo();
                                        let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
                                        let res = if let Some(monitor_update) = monitor_update_opt {
                                                let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
@@ -5562,7 +5562,7 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(chan_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                               if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
                                        if chan.get().context.should_announce() {
                                                // If the announcement is about a channel of ours which is public, some
                                                // other peer may simply be forwarding all its gossip to us. Don't provide
@@ -5571,7 +5571,7 @@ where
                                        }
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                }
-                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+                               let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
                                let msg_from_node_one = msg.contents.flags & 1 == 0;
                                if were_node_one == msg_from_node_one {
                                        return Ok(NotifyOption::SkipPersist);
@@ -5618,7 +5618,7 @@ where
                                                // they have the latest channel parameters.
                                                if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
                                                        channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
-                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               node_id: chan.get().context.get_counterparty_node_id(),
                                                                msg,
                                                        });
                                                }
@@ -5699,9 +5699,9 @@ where
                                                                        };
                                                                        self.issue_channel_close_events(&chan, reason);
                                                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               node_id: chan.context.get_counterparty_node_id(),
                                                                                action: msgs::ErrorAction::SendErrorMessage {
-                                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                                                       msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
                                                                                },
                                                                        });
                                                                }
@@ -5750,8 +5750,8 @@ where
                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
                                        for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
-                                               let counterparty_node_id = chan.get_counterparty_node_id();
-                                               let funding_txo = chan.get_funding_txo();
+                                               let counterparty_node_id = chan.context.get_counterparty_node_id();
+                                               let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
                                                        chan.maybe_free_holding_cell_htlcs(&self.logger);
                                                if !holding_cell_failed_htlcs.is_empty() {
@@ -5810,7 +5810,7 @@ where
                                                        if let Some(msg) = msg_opt {
                                                                has_update = true;
                                                                pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                                       node_id: chan.get_counterparty_node_id(), msg,
+                                                                       node_id: chan.context.get_counterparty_node_id(), msg,
                                                                });
                                                        }
                                                        if let Some(tx) = tx_opt {
@@ -5833,7 +5833,7 @@ where
                                                Err(e) => {
                                                        has_update = true;
                                                        let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                                       handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
                                                        !close_channel
                                                }
                                        }
@@ -6164,7 +6164,7 @@ where
                                }
 
                                if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
-                                       debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+                                       debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                        if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
                                                log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                        log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
@@ -6420,7 +6420,7 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        for chan in peer_state.channel_by_id.values() {
-                               if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+                               if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
                                        res.push((funding_txo.txid, Some(block_hash)));
                                }
                        }
@@ -6432,7 +6432,7 @@ where
                let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
                        &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(None, |channel| {
-                       if let Some(funding_txo) = channel.get_funding_txo() {
+                       if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
                                        channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
                                } else { Ok((None, Vec::new(), None)) }
@@ -6475,20 +6475,20 @@ where
                                                for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                        let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                        timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
-                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
+                                                               HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
                                                }
                                                if let Some(channel_ready) = channel_ready_opt {
                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                        if channel.context.is_usable() {
-                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
                                                                if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
-                                                                               node_id: channel.get_counterparty_node_id(),
+                                                                               node_id: channel.context.get_counterparty_node_id(),
                                                                                msg,
                                                                        });
                                                                }
                                                        } else {
-                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
                                                        }
                                                }
 
@@ -6498,9 +6498,9 @@ where
                                                }
 
                                                if let Some(announcement_sigs) = announcement_sigs {
-                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
                                                        pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                               node_id: channel.get_counterparty_node_id(),
+                                                               node_id: channel.context.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
                                                        });
                                                        if let Some(height) = height_opt {
@@ -6515,7 +6515,7 @@ where
                                                        }
                                                }
                                                if channel.is_our_channel_ready() {
-                                                       if let Some(real_scid) = channel.get_short_channel_id() {
+                                                       if let Some(real_scid) = channel.context.get_short_channel_id() {
                                                                // If we sent a 0conf channel_ready, and now have an SCID, we add it
                                                                // to the short_to_chan_info map here. Note that we check whether we
                                                                // can relay using the real SCID at relay-time (i.e.
@@ -6523,8 +6523,8 @@ where
                                                                // un-confirmed we force-close the channel, ensuring short_to_chan_info
                                                                // is always consistent.
                                                                let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
-                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
-                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+                                                               let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+                                                               assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
                                                                        "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
                                                                        fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
                                                        }
@@ -6542,9 +6542,9 @@ where
                                                let reason_message = format!("{}", reason);
                                                self.issue_channel_close_events(channel, reason);
                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: channel.get_counterparty_node_id(),
+                                                       node_id: channel.context.get_counterparty_node_id(),
                                                        action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
-                                                               channel_id: channel.channel_id(),
+                                                               channel_id: channel.context.channel_id(),
                                                                data: reason_message,
                                                        } },
                                                });
@@ -6910,7 +6910,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        let pending_msg_events = &mut peer_state.pending_msg_events;
                        peer_state.channel_by_id.retain(|_, chan| {
-                               let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
+                               let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
                                        if !chan.context.have_received_message() {
                                                // If we created this (outbound) channel while we were disconnected from the
                                                // peer we probably failed to send the open_channel message, which is now
@@ -6919,13 +6919,13 @@ where
                                                false
                                        } else {
                                                pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                                       node_id: chan.get_counterparty_node_id(),
+                                                       node_id: chan.context.get_counterparty_node_id(),
                                                        msg: chan.get_channel_reestablish(&self.logger),
                                                });
                                                true
                                        }
                                } else { true };
-                               if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+                               if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
                                        if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
                                                if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
@@ -7553,7 +7553,7 @@ where
                                }
                                number_of_channels += peer_state.channel_by_id.len();
                                for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if !channel.is_funding_initiated() {
+                                       if !channel.context.is_funding_initiated() {
                                                unfunded_channels += 1;
                                        }
                                }
@@ -7565,7 +7565,7 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                for (_, channel) in peer_state.channel_by_id.iter() {
-                                       if channel.is_funding_initiated() {
+                                       if channel.context.is_funding_initiated() {
                                                channel.write(writer)?;
                                        }
                                }
@@ -7851,7 +7851,7 @@ where
        pub default_config: UserConfig,
 
        /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
-       /// value.get_funding_txo() should be the key).
+       /// value.context.get_funding_txo() should be the key).
        ///
        /// If a monitor is inconsistent with the channel state during deserialization the channel will
        /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
@@ -7941,14 +7941,14 @@ where
                        let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
-                       let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                       let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
                                        // If the channel is ahead of the monitor, return InvalidValue:
                                        log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
+                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -7962,7 +7962,7 @@ where
                                        log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -7971,8 +7971,8 @@ where
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
-                                               channel_id: channel.channel_id(),
-                                               user_channel_id: channel.get_user_id(),
+                                               channel_id: channel.context.channel_id(),
+                                               user_channel_id: channel.context.get_user_id(),
                                                reason: ClosureReason::OutdatedChannelManager
                                        }, None));
                                        for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
@@ -7990,29 +7990,29 @@ where
                                                        // backwards leg of the HTLC will simply be rejected.
                                                        log_info!(args.logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
-                                                               log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
-                                                       failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
+                                                               log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0));
+                                                       failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
-                                               log_bytes!(channel.channel_id()), channel.context.get_latest_monitor_update_id(),
+                                               log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
-                                       if let Some(short_channel_id) = channel.get_short_channel_id() {
-                                               short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                       if let Some(short_channel_id) = channel.context.get_short_channel_id() {
+                                               short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
-                                       if channel.is_funding_initiated() {
-                                               id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
+                                       if channel.context.is_funding_initiated() {
+                                               id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
                                        }
-                                       match peer_channels.entry(channel.get_counterparty_node_id()) {
+                                       match peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        let by_id_map = entry.get_mut();
-                                                       by_id_map.insert(channel.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), channel);
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        let mut by_id_map = HashMap::new();
-                                                       by_id_map.insert(channel.channel_id(), channel);
+                                                       by_id_map.insert(channel.context.channel_id(), channel);
                                                        entry.insert(by_id_map);
                                                }
                                        }
@@ -8023,12 +8023,12 @@ where
                                // safely discard the channel.
                                let _ = channel.force_shutdown(false);
                                channel_closures.push_back((events::Event::ChannelClosed {
-                                       channel_id: channel.channel_id(),
-                                       user_channel_id: channel.get_user_id(),
+                                       channel_id: channel.context.channel_id(),
+                                       user_channel_id: channel.context.get_user_id(),
                                        reason: ClosureReason::DisconnectedPeer,
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
                                log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
@@ -8117,7 +8117,7 @@ where
                        let peer_state = peer_mtx.lock().unwrap();
                        for (_, chan) in peer_state.channel_by_id.iter() {
                                for update in chan.uncompleted_unblocked_mon_updates() {
-                                       if let Some(funding_txo) = chan.get_funding_txo() {
+                                       if let Some(funding_txo) = chan.context.get_funding_txo() {
                                                log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
                                                        update.update_id, log_bytes!(funding_txo.to_channel_id()));
                                                pending_background_events.push(
@@ -8409,25 +8409,25 @@ where
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
-                               if chan.outbound_scid_alias() == 0 {
+                               if chan.context.outbound_scid_alias() == 0 {
                                        let mut outbound_scid_alias;
                                        loop {
                                                outbound_scid_alias = fake_scid::Namespace::OutboundAlias
                                                        .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
                                                if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
                                        }
-                                       chan.set_outbound_scid_alias(outbound_scid_alias);
-                               } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
+                                       chan.context.set_outbound_scid_alias(outbound_scid_alias);
+                               } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
                                        // Note that in rare cases its possible to hit this while reading an older
                                        // channel if we just happened to pick a colliding outbound alias above.
-                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+                                       log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                        return Err(DecodeError::InvalidValue);
                                }
                                if chan.context.is_usable() {
-                                       if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+                                       if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
                                                // Note that in rare cases its possible to hit this while reading an older
                                                // channel if we just happened to pick a colliding outbound alias above.
-                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+                                               log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
                                                return Err(DecodeError::InvalidValue);
                                        }
                                }
index 6d980632d1ac41b8d1a7be022db9d65dfba213fa..79210068f32260be8e31faa01473dd404f7a1e22 100644 (file)
@@ -2237,10 +2237,10 @@ pub fn do_claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>,
                                                let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
                                                        .unwrap().lock().unwrap();
                                                let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
-                                               if let Some(prev_config) = channel.prev_config() {
+                                               if let Some(prev_config) = channel.context.prev_config() {
                                                        prev_config.forwarding_fee_base_msat
                                                } else {
-                                                       channel.config().forwarding_fee_base_msat
+                                                       channel.context.config().forwarding_fee_base_msat
                                                }
                                        };
                                        expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
index 10225e099cb0a1aecfc252bc0184ad5700a1afb7..58977e7896419cd56fa58f53f56f5b59cf93cc12 100644 (file)
@@ -6172,7 +6172,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
index 36e1bd753e294272150a403b5d24c5bd339d819d..1aa3420caf503f3704e502abe248fa869c7feded 100644 (file)
@@ -510,7 +510,7 @@ fn test_onion_failure() {
        let short_channel_id = channels[1].0.contents.short_channel_id;
        let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
                .unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
-               .get_counterparty_htlc_minimum_msat() - 1;
+               .context.get_counterparty_htlc_minimum_msat() - 1;
        let mut bogus_route = route.clone();
        let route_len = bogus_route.paths[0].hops.len();
        bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;
index f514fa1ed97693733363ff8bfc8540e380efdf55..ab010b5a723b9057c6b119f40c03f6bb492133d7 100644 (file)
@@ -607,9 +607,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
                        .unwrap().lock().unwrap();
                let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
-               let mut new_config = channel.config();
+               let mut new_config = channel.context.config();
                new_config.forwarding_fee_base_msat += 100_000;
-               channel.update_config(&new_config);
+               channel.context.update_config(&new_config);
                new_route.paths[0].hops[0].fee_msat += 100_000;
        }
 
@@ -1409,7 +1409,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.get_short_channel_id().unwrap()
+                       channel_1.context.get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1421,7 +1421,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.get_short_channel_id().unwrap()
+                       channel_2.context.get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, None);
@@ -1446,7 +1446,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.get_short_channel_id().unwrap()
+                       channel_1.context.get_short_channel_id().unwrap()
                );
                // First hop accounts for expected 1000 msat fee
                assert_eq!(chan_1_used_liquidity, Some(501000));
@@ -1459,7 +1459,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.get_short_channel_id().unwrap()
+                       channel_2.context.get_short_channel_id().unwrap()
                );
 
                assert_eq!(chan_2_used_liquidity, Some(500000));
@@ -1485,7 +1485,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel_1.get_short_channel_id().unwrap()
+                       channel_1.context.get_short_channel_id().unwrap()
                );
                assert_eq!(chan_1_used_liquidity, None);
        }
@@ -1497,7 +1497,7 @@ fn test_trivial_inflight_htlc_tracking(){
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
-                       channel_2.get_short_channel_id().unwrap()
+                       channel_2.context.get_short_channel_id().unwrap()
                );
                assert_eq!(chan_2_used_liquidity, None);
        }
@@ -1538,7 +1538,7 @@ fn test_holding_cell_inflight_htlcs() {
                let used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
-                       channel.get_short_channel_id().unwrap()
+                       channel.context.get_short_channel_id().unwrap()
                );
 
                assert_eq!(used_liquidity, Some(2000000));