Merge pull request #2468 from jkczyz/2023-08-offer-payment-id
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 29 Aug 2023 19:29:21 +0000 (19:29 +0000)
committerGitHub <noreply@github.com>
Tue, 29 Aug 2023 19:29:21 +0000 (19:29 +0000)
Offer outbound payments

42 files changed:
ci/ci-tests.sh
fuzz/src/chanmon_consistency.rs
fuzz/src/chanmon_deser.rs
fuzz/src/full_stack.rs
fuzz/src/onion_message.rs
fuzz/src/router.rs
fuzz/src/utils/test_persister.rs
lightning-background-processor/src/lib.rs
lightning-block-sync/src/convert.rs
lightning-block-sync/src/gossip.rs [new file with mode: 0644]
lightning-block-sync/src/lib.rs
lightning-block-sync/src/rest.rs
lightning-block-sync/src/rpc.rs
lightning-invoice/src/utils.rs
lightning-net-tokio/Cargo.toml
lightning-net-tokio/src/lib.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/chain/transaction.rs
lightning/src/events/mod.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channel_id.rs [new file with mode: 0644]
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/monitor_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reload_tests.rs
lightning/src/routing/gossip.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/util/enforcing_trait_impls.rs [deleted file]
lightning/src/util/macro_logger.rs
lightning/src/util/mod.rs
lightning/src/util/test_channel_signer.rs [new file with mode: 0644]
lightning/src/util/test_utils.rs
pending_changelog/new_channel_id_type_pr_2485.txt [new file with mode: 0644]

index ef9ecf7b86d8653d5bdf1b7ab758032425bd33d6..8c675a654be9304d3066b2af4e776dc6492ab0b3 100755 (executable)
@@ -32,6 +32,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace
 # The proc-macro2 crate switched to Rust edition 2021 starting with v1.0.66, i.e., has MSRV of 1.56
 [ "$RUSTC_MINOR_VERSION" -lt 56 ] && cargo update -p proc-macro2 --precise "1.0.65" --verbose
 
+# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0
+[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose
+
 [ "$LDK_COVERAGE_BUILD" != "" ] && export RUSTFLAGS="-C link-dead-code"
 
 export RUST_BACKTRACE=1
index 296b3a03e9c1b85b8df7567efb93a6e3b3125937..83470bf8bc8e063075bce4a217b7de72fa9c1808 100644 (file)
@@ -46,7 +46,7 @@ use lightning::ln::script::ShutdownScript;
 use lightning::ln::functional_test_utils::*;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::errors::APIError;
 use lightning::util::logger::Logger;
 use lightning::util::config::UserConfig;
@@ -118,7 +118,7 @@ struct TestChainMonitor {
        pub logger: Arc<dyn Logger>,
        pub keys: Arc<KeyProvider>,
        pub persister: Arc<TestPersister>,
-       pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       pub chain_monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
        // logic will automatically force-close our channels for us (as we don't have an up-to-date
        // monitor implying we are not able to punish misbehaving counterparties). Because this test
@@ -139,8 +139,8 @@ impl TestChainMonitor {
                }
        }
 }
-impl chain::Watch<EnforcingSigner> for TestChainMonitor {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl chain::Watch<TestChannelSigner> for TestChainMonitor {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                let mut ser = VecWriter(Vec::new());
                monitor.write(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
@@ -156,7 +156,7 @@ impl chain::Watch<EnforcingSigner> for TestChainMonitor {
                        hash_map::Entry::Occupied(entry) => entry,
                        hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
                };
-               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::
+               let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
                deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
@@ -234,7 +234,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed) as u8;
@@ -257,7 +257,7 @@ impl SignerProvider for KeyProvider {
                        channel_keys_id,
                );
                let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, revoked_commitment, false)
+               TestChannelSigner::new_with_revoked(keys, revoked_commitment, false)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, DecodeError> {
@@ -266,7 +266,7 @@ impl SignerProvider for KeyProvider {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner {
+               Ok(TestChannelSigner {
                        inner,
                        state,
                        disable_revocation_policy_check: false,
@@ -477,7 +477,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
                        let mut monitors = HashMap::new();
                        let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
                        for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
-                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
+                               monitors.insert(outpoint, <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&monitor_ser), (&*$keys_manager, &*$keys_manager)).expect("Failed to read monitor").1);
                                chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
                        }
                        let mut monitor_refs = HashMap::new();
index 61744ace7c733419149455dc21ab70fe7e496b60..8d425357c96844b2b9766de612879750f4dceddc 100644 (file)
@@ -4,7 +4,7 @@
 use bitcoin::hash_types::BlockHash;
 
 use lightning::chain::channelmonitor;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::ser::{ReadableArgs, Writer, Writeable};
 use lightning::util::test_utils::OnlyReadsKeysInterface;
 
@@ -22,10 +22,10 @@ impl Writer for VecWriter {
 
 #[inline]
 pub fn do_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
-       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
+       if let Ok((latest_block_hash, monitor)) = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(data), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})) {
                let mut w = VecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
+               let deserialized_copy = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(&mut Cursor::new(&w.0), (&OnlyReadsKeysInterface {}, &OnlyReadsKeysInterface {})).unwrap();
                assert!(latest_block_hash == deserialized_copy.0);
                assert!(monitor == deserialized_copy.1);
        }
index cf8060ab6b647f73436c7dae1c7e216c3a400aad..a11e5c81368f5757be8081fc7cdbefb6bc6549a0 100644 (file)
@@ -34,7 +34,7 @@ use lightning::chain::chainmonitor;
 use lightning::chain::transaction::OutPoint;
 use lightning::sign::{InMemorySigner, Recipient, KeyMaterial, EntropySource, NodeSigner, SignerProvider};
 use lightning::events::Event;
-use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
+use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentId, RecipientOnionFields, Retry};
 use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor,IgnoringMessageHandler};
 use lightning::ln::msgs::{self, DecodeError};
@@ -47,7 +47,7 @@ use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::{InFlightHtlcs, PaymentParameters, Route, RouteParameters, Router};
 use lightning::util::config::{UserConfig, MaxDustHTLCExposure};
 use lightning::util::errors::APIError;
-use lightning::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use lightning::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use lightning::util::logger::Logger;
 use lightning::util::ser::{ReadableArgs, Writeable};
 
@@ -180,13 +180,13 @@ impl<'a> std::hash::Hash for Peer<'a> {
 }
 
 type ChannelMan<'a> = ChannelManager<
-       Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<KeyProvider>, Arc<FuzzEstimator>, &'a FuzzRouter, Arc<dyn Logger>>;
 type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan<'a>>, Arc<P2PGossipSync<Arc<NetworkGraph<Arc<dyn Logger>>>, Arc<dyn UtxoLookup>, Arc<dyn Logger>>>, IgnoringMessageHandler, Arc<dyn Logger>, IgnoringMessageHandler, Arc<KeyProvider>>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan<'a>>,
-       monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+       monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
        handler: PeerMan<'a>,
 
        peers: &'a RefCell<[bool; 256]>,
@@ -200,7 +200,7 @@ struct MoneyLossDetector<'a> {
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
                   manager: Arc<ChannelMan<'a>>,
-                  monitor: Arc<chainmonitor::ChainMonitor<EnforcingSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
+                  monitor: Arc<chainmonitor::ChainMonitor<TestChannelSigner, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<TestPersister>>>,
                   handler: PeerMan<'a>) -> Self {
                MoneyLossDetector {
                        manager,
@@ -339,7 +339,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
                let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
@@ -351,7 +351,7 @@ impl SignerProvider for KeyProvider {
                let secp_ctx = Secp256k1::signing_only();
                let ctr = channel_keys_id[0];
                let (inbound, state) = self.signer_state.borrow().get(&ctr).unwrap().clone();
-               EnforcingSigner::new_with_revoked(if inbound {
+               TestChannelSigner::new_with_revoked(if inbound {
                        InMemorySigner::new(
                                &secp_ctx,
                                SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(),
@@ -380,11 +380,11 @@ impl SignerProvider for KeyProvider {
                }, state, false)
        }
 
-       fn read_chan_signer(&self, mut data: &[u8]) -> Result<EnforcingSigner, DecodeError> {
+       fn read_chan_signer(&self, mut data: &[u8]) -> Result<TestChannelSigner, DecodeError> {
                let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -481,7 +481,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        let mut should_forward = false;
        let mut payments_received: Vec<PaymentHash> = Vec::new();
        let mut payments_sent = 0;
-       let mut pending_funding_generation: Vec<([u8; 32], PublicKey, u64, Script)> = Vec::new();
+       let mut pending_funding_generation: Vec<(ChannelId, PublicKey, u64, Script)> = Vec::new();
        let mut pending_funding_signatures = HashMap::new();
 
        loop {
index 0ffc090ea197514c8ab5eacf3e2a13030b11f759..d2e35cd45cdbabc7745abd4f49c8cdf94e793362 100644 (file)
@@ -11,7 +11,7 @@ use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler};
 use lightning::ln::script::ShutdownScript;
 use lightning::offers::invoice::UnsignedBolt12Invoice;
 use lightning::offers::invoice_request::UnsignedInvoiceRequest;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 use lightning::util::logger::Logger;
 use lightning::util::ser::{Readable, Writeable, Writer};
 use lightning::onion_message::{CustomOnionMessageContents, CustomOnionMessageHandler, Destination, MessageRouter, OffersMessage, OffersMessageHandler, OnionMessagePath, OnionMessenger};
@@ -174,7 +174,7 @@ impl NodeSigner for KeyProvider {
 }
 
 impl SignerProvider for KeyProvider {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!() }
 
@@ -182,7 +182,7 @@ impl SignerProvider for KeyProvider {
                unreachable!()
        }
 
-       fn read_chan_signer(&self, _data: &[u8]) -> Result<EnforcingSigner, DecodeError> { unreachable!() }
+       fn read_chan_signer(&self, _data: &[u8]) -> Result<TestChannelSigner, DecodeError> { unreachable!() }
 
        fn get_destination_script(&self) -> Result<Script, ()> { unreachable!() }
 
index 6195e736618520bfaf9e4e87dca9ba2139988d8f..830f6f4e201cb3d91155dca468492d586133b031 100644 (file)
@@ -13,6 +13,7 @@ use bitcoin::hash_types::BlockHash;
 
 use lightning::blinded_path::{BlindedHop, BlindedPath};
 use lightning::chain::transaction::OutPoint;
+use lightning::ln::ChannelId;
 use lightning::ln::channelmanager::{self, ChannelDetails, ChannelCounterparty};
 use lightning::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures};
 use lightning::ln::msgs;
@@ -210,7 +211,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                let rnid = node_pks.iter().skip(u16::from_be_bytes(get_slice!(2).try_into().unwrap()) as usize % node_pks.len()).next().unwrap();
                                                let capacity = u64::from_be_bytes(get_slice!(8).try_into().unwrap());
                                                $first_hops_vec.push(ChannelDetails {
-                                                       channel_id: [0; 32],
+                                                       channel_id: ChannelId::new_zero(),
                                                        counterparty: ChannelCounterparty {
                                                                node_id: *rnid,
                                                                features: channelmanager::provided_init_features(&UserConfig::default()),
index e3635297adb037500adafebd98708025c99fa762..89de25aa5e6a192786a36b8d97605dbabf86b25d 100644 (file)
@@ -2,19 +2,19 @@ use lightning::chain;
 use lightning::chain::{chainmonitor, channelmonitor};
 use lightning::chain::chainmonitor::MonitorUpdateId;
 use lightning::chain::transaction::OutPoint;
-use lightning::util::enforcing_trait_impls::EnforcingSigner;
+use lightning::util::test_channel_signer::TestChannelSigner;
 
 use std::sync::Mutex;
 
 pub struct TestPersister {
        pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
-impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+impl chainmonitor::Persist<TestChannelSigner> for TestPersister {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 }
index 1ed6a2a8345bf8751ad6389e8c306804bedf1e91..8648920ec2c197be752ab4e8cc8959f08ee14d87 100644 (file)
@@ -34,7 +34,7 @@ use lightning::ln::peer_handler::APeerManager;
 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
 use lightning::routing::utxo::UtxoLookup;
 use lightning::routing::router::Router;
-use lightning::routing::scoring::{Score, WriteableScore};
+use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
 use lightning::util::logger::Logger;
 use lightning::util::persist::Persister;
 #[cfg(feature = "std")]
@@ -241,23 +241,27 @@ fn handle_network_graph_update<L: Deref>(
 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
        scorer: &'a S, event: &Event
 ) -> bool {
-       let mut score = scorer.lock();
        match event {
                Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
+                       let mut score = scorer.write_lock();
                        score.payment_path_failed(path, *scid);
                },
                Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
                        // Reached if the destination explicitly failed it back. We treat this as a successful probe
                        // because the payment made it all the way to the destination with sufficient liquidity.
+                       let mut score = scorer.write_lock();
                        score.probe_successful(path);
                },
                Event::PaymentPathSuccessful { path, .. } => {
+                       let mut score = scorer.write_lock();
                        score.payment_path_successful(path);
                },
                Event::ProbeSuccessful { path, .. } => {
+                       let mut score = scorer.write_lock();
                        score.probe_successful(path);
                },
                Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
+                       let mut score = scorer.write_lock();
                        score.probe_failed(path, *scid);
                },
                _ => return false,
@@ -858,7 +862,7 @@ mod tests {
        use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
        use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
        use lightning::routing::router::{DefaultRouter, Path, RouteHop};
-       use lightning::routing::scoring::{ChannelUsage, Score};
+       use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
@@ -1033,12 +1037,14 @@ mod tests {
                fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
        }
 
-       impl Score for TestScorer {
+       impl ScoreLookUp for TestScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(
                        &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
                ) -> u64 { unimplemented!(); }
+       }
 
+       impl ScoreUpdate for TestScorer {
                fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
                        if let Some(expectations) = &mut self.event_expectations {
                                match expectations.pop_front().unwrap() {
index d6294e1d2a79518c05a674cae5ea86d41fc1faf7..bf9e9577619a3fd0a1a7fecc3d79cd070bd706e1 100644 (file)
@@ -13,8 +13,14 @@ use serde_json;
 use std::convert::From;
 use std::convert::TryFrom;
 use std::convert::TryInto;
+use std::str::FromStr;
 use bitcoin::hashes::Hash;
 
+impl TryInto<serde_json::Value> for JsonResponse {
+       type Error = std::io::Error;
+       fn try_into(self) -> Result<serde_json::Value, std::io::Error> { Ok(self.0) }
+}
+
 /// Conversion from `std::io::Error` into `BlockSourceError`.
 impl From<std::io::Error> for BlockSourceError {
        fn from(e: std::io::Error) -> BlockSourceError {
@@ -38,6 +44,17 @@ impl TryInto<Block> for BinaryResponse {
        }
 }
 
+/// Parses binary data as a block hash.
+impl TryInto<BlockHash> for BinaryResponse {
+       type Error = std::io::Error;
+
+       fn try_into(self) -> std::io::Result<BlockHash> {
+               BlockHash::from_slice(&self.0).map_err(|_|
+                       std::io::Error::new(std::io::ErrorKind::InvalidData, "bad block hash length")
+               )
+       }
+}
+
 /// Converts a JSON value into block header data. The JSON value may be an object representing a
 /// block header or an array of such objects. In the latter case, the first object is converted.
 impl TryInto<BlockHeaderData> for JsonResponse {
@@ -226,6 +243,46 @@ impl TryInto<Transaction> for JsonResponse {
        }
 }
 
+impl TryInto<BlockHash> for JsonResponse {
+       type Error = std::io::Error;
+
+       fn try_into(self) -> std::io::Result<BlockHash> {
+               match self.0.as_str() {
+                       None => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "expected JSON string")),
+                       Some(hex_data) if hex_data.len() != 64 =>
+                               Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid hash length")),
+                       Some(hex_data) => BlockHash::from_str(hex_data)
+                               .map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid hex data")),
+               }
+       }
+}
+
+/// The REST `getutxos` endpoint retuns a whole pile of data we don't care about and one bit we do
+/// - whether the `hit bitmap` field had any entries. Thus we condense the result down into only
+/// that.
+pub(crate) struct GetUtxosResponse {
+       pub(crate) hit_bitmap_nonempty: bool
+}
+
+impl TryInto<GetUtxosResponse> for JsonResponse {
+       type Error = std::io::Error;
+
+       fn try_into(self) -> std::io::Result<GetUtxosResponse> {
+               let bitmap_str =
+                       self.0.as_object().ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "expected an object"))?
+                       .get("bitmap").ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "missing bitmap field"))?
+                       .as_str().ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "bitmap should be an str"))?;
+                       let mut hit_bitmap_nonempty = false;
+                       for c in bitmap_str.chars() {
+                               if c < '0' || c > '9' {
+                                       return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid byte"));
+                               }
+                               if c > '0' { hit_bitmap_nonempty = true; }
+                       }
+                       Ok(GetUtxosResponse { hit_bitmap_nonempty })
+       }
+}
+
 #[cfg(test)]
 pub(crate) mod tests {
        use super::*;
diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs
new file mode 100644 (file)
index 0000000..37f4268
--- /dev/null
@@ -0,0 +1,319 @@
+//! When fetching gossip from peers, lightning nodes need to validate that gossip against the
+//! current UTXO set. This module defines an implementation of the LDK API required to do so
+//! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set.
+
+use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError};
+
+use bitcoin::blockdata::block::Block;
+use bitcoin::blockdata::transaction::{TxOut, OutPoint};
+use bitcoin::hash_types::BlockHash;
+
+use lightning::sign::NodeSigner;
+
+use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
+use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler};
+
+use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
+use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoResult, UtxoLookupError};
+
+use lightning::util::logger::Logger;
+
+use std::sync::{Arc, Mutex};
+use std::collections::VecDeque;
+use std::future::Future;
+use std::ops::Deref;
+use std::pin::Pin;
+use std::task::Poll;
+
+/// A trait which extends [`BlockSource`] and can be queried to fetch the block at a given height
+/// as well as whether a given output is unspent (i.e. a member of the current UTXO set).
+///
+/// Note that while this is implementable for a [`BlockSource`] which returns filtered block data
+/// (i.e. [`BlockData::HeaderOnly`] for [`BlockSource::get_block`] requests), such an
+/// implementation will reject all gossip as it is not fully able to verify the UTXOs referenced.
+pub trait UtxoSource : BlockSource + 'static {
+       /// Fetches the block hash of the block at the given height.
+       ///
+       /// This will, in turn, be passed to to [`BlockSource::get_block`] to fetch the block needed
+       /// for gossip validation.
+       fn get_block_hash_by_height<'a>(&'a self, block_height: u32) -> AsyncBlockSourceResult<'a, BlockHash>;
+
+       /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO
+       /// set.
+       fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>;
+}
+
+/// A generic trait which is able to spawn futures in the background.
+///
+/// If the `tokio` feature is enabled, this is implemented on `TokioSpawner` struct which
+/// delegates to `tokio::spawn()`.
+pub trait FutureSpawner : Send + Sync + 'static {
+       /// Spawns the given future as a background task.
+       ///
+       /// This method MUST NOT block on the given future immediately.
+       fn spawn<T: Future<Output = ()> + Send + 'static>(&self, future: T);
+}
+
+#[cfg(feature = "tokio")]
+/// A trivial [`FutureSpawner`] which delegates to `tokio::spawn`.
+pub struct TokioSpawner;
+#[cfg(feature = "tokio")]
+impl FutureSpawner for TokioSpawner {
+       fn spawn<T: Future<Output = ()> + Send + 'static>(&self, future: T) {
+               tokio::spawn(future);
+       }
+}
+
+/// A trivial future which joins two other futures and polls them at the same time, returning only
+/// once both complete.
+pub(crate) struct Joiner<
+       A: Future<Output=Result<(BlockHash, Option<u32>), BlockSourceError>> + Unpin,
+       B: Future<Output=Result<BlockHash, BlockSourceError>> + Unpin,
+> {
+       pub a: A,
+       pub b: B,
+       a_res: Option<(BlockHash, Option<u32>)>,
+       b_res: Option<BlockHash>,
+}
+
+impl<
+       A: Future<Output=Result<(BlockHash, Option<u32>), BlockSourceError>> + Unpin,
+       B: Future<Output=Result<BlockHash, BlockSourceError>> + Unpin,
+> Joiner<A, B> {
+       fn new(a: A, b: B) -> Self { Self { a, b, a_res: None, b_res: None } }
+}
+
+impl<
+       A: Future<Output=Result<(BlockHash, Option<u32>), BlockSourceError>> + Unpin,
+       B: Future<Output=Result<BlockHash, BlockSourceError>> + Unpin,
+> Future for Joiner<A, B> {
+       type Output = Result<((BlockHash, Option<u32>), BlockHash), BlockSourceError>;
+       fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<Self::Output> {
+               if self.a_res.is_none() {
+                       match Pin::new(&mut self.a).poll(ctx) {
+                               Poll::Ready(res) => {
+                                       if let Ok(ok) = res {
+                                               self.a_res = Some(ok);
+                                       } else {
+                                               return Poll::Ready(Err(res.unwrap_err()));
+                                       }
+                               },
+                               Poll::Pending => {},
+                       }
+               }
+               if self.b_res.is_none() {
+                       match Pin::new(&mut self.b).poll(ctx) {
+                               Poll::Ready(res) => {
+                                       if let Ok(ok) = res {
+                                               self.b_res = Some(ok);
+                                       } else {
+                                               return Poll::Ready(Err(res.unwrap_err()));
+                                       }
+
+                               },
+                               Poll::Pending => {},
+                       }
+               }
+               if let Some(b_res) = self.b_res {
+                       if let Some(a_res) = self.a_res {
+                               return Poll::Ready(Ok((a_res, b_res)))
+                       }
+               }
+               Poll::Pending
+       }
+}
+
+/// A struct which wraps a [`UtxoSource`] and a few LDK objects and implements the LDK
+/// [`UtxoLookup`] trait.
+///
+/// Note that if you're using this against a Bitcoin Core REST or RPC server, you likely wish to
+/// increase the `rpcworkqueue` setting in Bitcoin Core as LDK attempts to parallelize requests (a
+/// value of 1024 should more than suffice), and ensure you have sufficient file descriptors
+/// available on both Bitcoin Core and your LDK application for each request to hold its own
+/// connection.
+pub struct GossipVerifier<S: FutureSpawner,
+       Blocks: Deref + Send + Sync + 'static + Clone,
+       L: Deref + Send + Sync + 'static,
+       Descriptor: SocketDescriptor + Send + Sync + 'static,
+       CM: Deref + Send + Sync + 'static,
+       OM: Deref + Send + Sync + 'static,
+       CMH: Deref + Send + Sync + 'static,
+       NS: Deref + Send + Sync + 'static,
+> where
+       Blocks::Target: UtxoSource,
+       L::Target: Logger,
+       CM::Target: ChannelMessageHandler,
+       OM::Target: OnionMessageHandler,
+       CMH::Target: CustomMessageHandler,
+       NS::Target: NodeSigner,
+{
+       source: Blocks,
+       peer_manager: Arc<PeerManager<Descriptor, CM, Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, OM, L, CMH, NS>>,
+       gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>,
+       spawn: S,
+       block_cache: Arc<Mutex<VecDeque<(u32, Block)>>>,
+}
+
+const BLOCK_CACHE_SIZE: usize = 5;
+
+impl<S: FutureSpawner,
+       Blocks: Deref + Send + Sync + Clone,
+       L: Deref + Send + Sync,
+       Descriptor: SocketDescriptor + Send + Sync,
+       CM: Deref + Send + Sync,
+       OM: Deref + Send + Sync,
+       CMH: Deref + Send + Sync,
+       NS: Deref + Send + Sync,
+> GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       Blocks::Target: UtxoSource,
+       L::Target: Logger,
+       CM::Target: ChannelMessageHandler,
+       OM::Target: OnionMessageHandler,
+       CMH::Target: CustomMessageHandler,
+       NS::Target: NodeSigner,
+{
+       /// Constructs a new [`GossipVerifier`].
+       ///
+       /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for
+       /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`].
+       pub fn new(source: Blocks, spawn: S, gossiper: Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, peer_manager: Arc<PeerManager<Descriptor, CM, Arc<P2PGossipSync<Arc<NetworkGraph<L>>, Self, L>>, OM, L, CMH, NS>>) -> Self {
+               Self {
+                       source, spawn, gossiper, peer_manager,
+                       block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))),
+               }
+       }
+
+       async fn retrieve_utxo(
+               source: Blocks, block_cache: Arc<Mutex<VecDeque<(u32, Block)>>>, short_channel_id: u64
+       ) -> Result<TxOut, UtxoLookupError> {
+               let block_height = (short_channel_id >> 5 * 8) as u32; // block height is most significant three bytes
+               let transaction_index = ((short_channel_id >> 2 * 8) & 0xffffff) as u32;
+               let output_index = (short_channel_id & 0xffff) as u16;
+
+               let (outpoint, output);
+
+               'tx_found: loop { // Used as a simple goto
+                       macro_rules! process_block {
+                               ($block: expr) => { {
+                                       if transaction_index as usize >= $block.txdata.len() {
+                                               return Err(UtxoLookupError::UnknownTx);
+                                       }
+                                       let transaction = &$block.txdata[transaction_index as usize];
+                                       if output_index as usize >= transaction.output.len() {
+                                               return Err(UtxoLookupError::UnknownTx);
+                                       }
+
+                                       outpoint = OutPoint::new(transaction.txid(), output_index.into());
+                                       output = transaction.output[output_index as usize].clone();
+                               } }
+                       }
+                       {
+                               let recent_blocks = block_cache.lock().unwrap();
+                               for (height, block) in recent_blocks.iter() {
+                                       if *height == block_height {
+                                               process_block!(block);
+                                               break 'tx_found;
+                                       }
+                               }
+                       }
+
+                       let ((_, tip_height_opt), block_hash) =
+                               Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height))
+                               .await
+                               .map_err(|_| UtxoLookupError::UnknownTx)?;
+                       if let Some(tip_height) = tip_height_opt {
+                               // If the block doesn't yet have five confirmations, error out.
+                               //
+                               // The BOLT spec requires nodes wait for six confirmations before announcing a
+                               // channel, and we give them one block of headroom in case we're delayed seeing a
+                               // block.
+                               if block_height + 5 > tip_height {
+                                       return Err(UtxoLookupError::UnknownTx);
+                               }
+                       }
+                       let block_data = source.get_block(&block_hash).await
+                               .map_err(|_| UtxoLookupError::UnknownTx)?;
+                       let block = match block_data {
+                               BlockData::HeaderOnly(_) => return Err(UtxoLookupError::UnknownTx),
+                               BlockData::FullBlock(block) => block,
+                       };
+                       process_block!(block);
+                       {
+                               let mut recent_blocks = block_cache.lock().unwrap();
+                               let mut insert = true;
+                               for (height, _) in recent_blocks.iter() {
+                                       if *height == block_height {
+                                               insert = false;
+                                       }
+                               }
+                               if insert {
+                                       if recent_blocks.len() >= BLOCK_CACHE_SIZE {
+                                               recent_blocks.pop_front();
+                                       }
+                                       recent_blocks.push_back((block_height, block));
+                               }
+                       }
+                       break 'tx_found;
+               };
+               let outpoint_unspent =
+                       source.is_output_unspent(outpoint).await.map_err(|_| UtxoLookupError::UnknownTx)?;
+               if outpoint_unspent {
+                       Ok(output)
+               } else {
+                       Err(UtxoLookupError::UnknownTx)
+               }
+       }
+}
+
+impl<S: FutureSpawner,
+       Blocks: Deref + Send + Sync + Clone,
+       L: Deref + Send + Sync,
+       Descriptor: SocketDescriptor + Send + Sync,
+       CM: Deref + Send + Sync,
+       OM: Deref + Send + Sync,
+       CMH: Deref + Send + Sync,
+       NS: Deref + Send + Sync,
+> Deref for GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       Blocks::Target: UtxoSource,
+       L::Target: Logger,
+       CM::Target: ChannelMessageHandler,
+       OM::Target: OnionMessageHandler,
+       CMH::Target: CustomMessageHandler,
+       NS::Target: NodeSigner,
+{
+       type Target = Self;
+       fn deref(&self) -> &Self { self }
+}
+
+
+impl<S: FutureSpawner,
+       Blocks: Deref + Send + Sync + Clone,
+       L: Deref + Send + Sync,
+       Descriptor: SocketDescriptor + Send + Sync,
+       CM: Deref + Send + Sync,
+       OM: Deref + Send + Sync,
+       CMH: Deref + Send + Sync,
+       NS: Deref + Send + Sync,
+> UtxoLookup for GossipVerifier<S, Blocks, L, Descriptor, CM, OM, CMH, NS> where
+       Blocks::Target: UtxoSource,
+       L::Target: Logger,
+       CM::Target: ChannelMessageHandler,
+       OM::Target: OnionMessageHandler,
+       CMH::Target: CustomMessageHandler,
+       NS::Target: NodeSigner,
+{
+       fn get_utxo(&self, _genesis_hash: &BlockHash, short_channel_id: u64) -> UtxoResult {
+               let res = UtxoFuture::new();
+               let fut = res.clone();
+               let source = self.source.clone();
+               let gossiper = Arc::clone(&self.gossiper);
+               let block_cache = Arc::clone(&self.block_cache);
+               let pm = Arc::clone(&self.peer_manager);
+               self.spawn.spawn(async move {
+                       let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await;
+                       fut.resolve(gossiper.network_graph(), &*gossiper, res);
+                       pm.process_events();
+               });
+               UtxoResult::Async(res)
+       }
+}
index 5c7c0dee8c10c4dddaf3ea630238db5e59387650..3561a1b5d769f19381dfdf1bec1e7749cdf8c2d8 100644 (file)
@@ -28,6 +28,8 @@ pub mod http;
 pub mod init;
 pub mod poll;
 
+pub mod gossip;
+
 #[cfg(feature = "rest-client")]
 pub mod rest;
 
index 4300893013c06d7a3ee81c715b256066f4d28333..5690da12ea0495c1c8062655a95b27e83611cfa8 100644 (file)
@@ -3,7 +3,10 @@
 
 use crate::{BlockData, BlockHeaderData, BlockSource, AsyncBlockSourceResult};
 use crate::http::{BinaryResponse, HttpEndpoint, HttpClient, JsonResponse};
+use crate::gossip::UtxoSource;
+use crate::convert::GetUtxosResponse;
 
+use bitcoin::OutPoint;
 use bitcoin::hash_types::BlockHash;
 use bitcoin::hashes::hex::ToHex;
 
@@ -60,11 +63,30 @@ impl BlockSource for RestClient {
        }
 }
 
+impl UtxoSource for RestClient {
+       fn get_block_hash_by_height<'a>(&'a self, block_height: u32) -> AsyncBlockSourceResult<'a, BlockHash> {
+               Box::pin(async move {
+                       let resource_path = format!("blockhashbyheight/{}.bin", block_height);
+                       Ok(self.request_resource::<BinaryResponse, _>(&resource_path).await?)
+               })
+       }
+
+       fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> {
+               Box::pin(async move {
+                       let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_hex(), outpoint.vout);
+                       let utxo_result =
+                               self.request_resource::<JsonResponse, GetUtxosResponse>(&resource_path).await?;
+                       Ok(utxo_result.hit_bitmap_nonempty)
+               })
+       }
+}
+
 #[cfg(test)]
 mod tests {
        use super::*;
        use crate::http::BinaryResponse;
        use crate::http::client_tests::{HttpServer, MessageBody};
+       use bitcoin::hashes::Hash;
 
        /// Parses binary data as a string-encoded `u32`.
        impl TryInto<u32> for BinaryResponse {
@@ -113,4 +135,32 @@ mod tests {
                        Ok(n) => assert_eq!(n, 42),
                }
        }
+
+       #[tokio::test]
+       async fn parses_negative_getutxos() {
+               let server = HttpServer::responding_with_ok(MessageBody::Content(
+                       // A real response contains a few more fields, but we actually only look at the
+                       // "bitmap" field, so this should suffice for testing
+                       "{\"chainHeight\": 1, \"bitmap\":\"0\",\"utxos\":[]}"
+               ));
+               let client = RestClient::new(server.endpoint()).unwrap();
+
+               let outpoint = OutPoint::new(bitcoin::Txid::from_inner([0; 32]), 0);
+               let unspent_output = client.is_output_unspent(outpoint).await.unwrap();
+               assert_eq!(unspent_output, false);
+       }
+
+       #[tokio::test]
+       async fn parses_positive_getutxos() {
+               let server = HttpServer::responding_with_ok(MessageBody::Content(
+                       // A real response contains lots more data, but we actually only look at the "bitmap"
+                       // field, so this should suffice for testing
+                       "{\"chainHeight\": 1, \"bitmap\":\"1\",\"utxos\":[]}"
+               ));
+               let client = RestClient::new(server.endpoint()).unwrap();
+
+               let outpoint = OutPoint::new(bitcoin::Txid::from_inner([0; 32]), 0);
+               let unspent_output = client.is_output_unspent(outpoint).await.unwrap();
+               assert_eq!(unspent_output, true);
+       }
 }
index 4c4706cb1cd584e201fe1464d20abe5015dbe7bc..0ad94040acaf0cfcfd326588ecedc35ed1a00a19 100644 (file)
@@ -3,9 +3,11 @@
 
 use crate::{BlockData, BlockHeaderData, BlockSource, AsyncBlockSourceResult};
 use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse};
+use crate::gossip::UtxoSource;
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::hashes::hex::ToHex;
+use bitcoin::OutPoint;
 
 use std::sync::Mutex;
 
@@ -105,12 +107,13 @@ impl RpcClient {
                        return Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error));
                }
 
-               let result = &mut response["result"];
-               if result.is_null() {
-                       return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "expected JSON result"));
-               }
+               let result = match response.get_mut("result") {
+                       Some(result) => result.take(),
+                       None =>
+                               return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "expected JSON result")),
+               };
 
-               JsonResponse(result.take()).try_into()
+               JsonResponse(result).try_into()
        }
 }
 
@@ -137,11 +140,33 @@ impl BlockSource for RpcClient {
        }
 }
 
+impl UtxoSource for RpcClient {
+       fn get_block_hash_by_height<'a>(&'a self, block_height: u32) -> AsyncBlockSourceResult<'a, BlockHash> {
+               Box::pin(async move {
+                       let height_param = serde_json::json!(block_height);
+                       Ok(self.call_method("getblockhash", &[height_param]).await?)
+               })
+       }
+
+       fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> {
+               Box::pin(async move {
+                       let txid_param = serde_json::json!(outpoint.txid.to_hex());
+                       let vout_param = serde_json::json!(outpoint.vout);
+                       let include_mempool = serde_json::json!(false);
+                       let utxo_opt: serde_json::Value = self.call_method(
+                               "gettxout", &[txid_param, vout_param, include_mempool]).await?;
+                       Ok(!utxo_opt.is_null())
+               })
+       }
+}
+
 #[cfg(test)]
 mod tests {
        use super::*;
        use crate::http::client_tests::{HttpServer, MessageBody};
 
+       use bitcoin::hashes::Hash;
+
        /// Credentials encoded in base64.
        const CREDENTIALS: &'static str = "dXNlcjpwYXNzd29yZA==";
 
@@ -205,7 +230,7 @@ mod tests {
 
        #[tokio::test]
        async fn call_method_returning_missing_result() {
-               let response = serde_json::json!({ "result": null });
+               let response = serde_json::json!({  });
                let server = HttpServer::responding_with_ok(MessageBody::Content(response));
                let client = RpcClient::new(CREDENTIALS, server.endpoint()).unwrap();
 
@@ -244,4 +269,24 @@ mod tests {
                        Ok(count) => assert_eq!(count, 654470),
                }
        }
+
+       #[tokio::test]
+       async fn fails_to_fetch_spent_utxo() {
+               let response = serde_json::json!({ "result": null });
+               let server = HttpServer::responding_with_ok(MessageBody::Content(response));
+               let client = RpcClient::new(CREDENTIALS, server.endpoint()).unwrap();
+               let outpoint = OutPoint::new(bitcoin::Txid::from_inner([0; 32]), 0);
+               let unspent_output = client.is_output_unspent(outpoint).await.unwrap();
+               assert_eq!(unspent_output, false);
+       }
+
+       #[tokio::test]
+       async fn fetches_utxo() {
+               let response = serde_json::json!({ "result": {"bestblock": 1, "confirmations": 42}});
+               let server = HttpServer::responding_with_ok(MessageBody::Content(response));
+               let client = RpcClient::new(CREDENTIALS, server.endpoint()).unwrap();
+               let outpoint = OutPoint::new(bitcoin::Txid::from_inner([0; 32]), 0);
+               let unspent_output = client.is_output_unspent(outpoint).await.unwrap();
+               assert_eq!(unspent_output, true);
+       }
 }
index cc38e67c24325233919a65de4232ab156029323b..744c2654cd84705965999d09f602e4c17f2de3dd 100644 (file)
@@ -627,7 +627,7 @@ where
        log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
        for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
                if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
-                       log_trace!(logger, "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id));
+                       log_trace!(logger, "Ignoring channel {} for invoice route hints", &channel.channel_id);
                        continue;
                }
 
@@ -641,7 +641,7 @@ where
                                // If any public channel exists, return no hints and let the sender
                                // look at the public channels instead.
                                log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                                return vec![].into_iter().take(MAX_CHANNEL_HINTS).map(route_hint_from_channel);
                        }
                }
@@ -681,18 +681,18 @@ where
                                        log_trace!(logger,
                                                "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
                                                log_pubkey!(channel.counterparty.node_id),
-                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               &channel.channel_id, channel.short_channel_id,
                                                channel.inbound_capacity_msat,
-                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               &entry.get().channel_id, entry.get().short_channel_id,
                                                current_max_capacity);
                                        entry.insert(channel);
                                } else {
                                        log_trace!(logger,
                                                "Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
                                                log_pubkey!(channel.counterparty.node_id),
-                                               log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
+                                               &entry.get().channel_id, entry.get().short_channel_id,
                                                current_max_capacity,
-                                               log_bytes!(channel.channel_id), channel.short_channel_id,
+                                               &channel.channel_id, channel.short_channel_id,
                                                channel.inbound_capacity_msat);
                                }
                        }
@@ -731,14 +731,14 @@ where
 
                        if include_channel {
                                log_trace!(logger, "Including channel {} in invoice route hints",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        } else if !has_enough_capacity {
                                log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        } else {
                                debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public));
                                log_trace!(logger, "Ignoring channel {} with disconnected peer",
-                                       log_bytes!(channel.channel_id));
+                                       &channel.channel_id);
                        }
 
                        include_channel
index c2befdae51cbf5d44d22014cb364c4fde39a6661..d2ac6e5474543db1392926dc8dfebf07715b0d86 100644 (file)
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] }
+tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
 lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
index d6d8004164bf9ab54db94ee64b55ef63f5f57e74..6e2ea3f14c14f54b890f8ff504e4f3fe6dd0a304 100644 (file)
 
 use bitcoin::secp256k1::PublicKey;
 
-use tokio::net::TcpStream;
+use tokio::net::{tcp, TcpStream};
 use tokio::{io, time};
 use tokio::sync::mpsc;
-use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
+use tokio::io::AsyncWrite;
 
 use lightning::ln::peer_handler;
 use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
@@ -59,7 +59,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0);
 // define a trivial two- and three- select macro with the specific types we need and just use that.
 
 pub(crate) enum SelectorOutput {
-       A(Option<()>), B(Option<()>), C(tokio::io::Result<usize>),
+       A(Option<()>), B(Option<()>), C(tokio::io::Result<()>),
 }
 
 pub(crate) struct TwoSelector<
@@ -87,7 +87,7 @@ impl<
 }
 
 pub(crate) struct ThreeSelector<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > {
        pub a: A,
        pub b: B,
@@ -95,7 +95,7 @@ pub(crate) struct ThreeSelector<
 }
 
 impl<
-       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<usize>> + Unpin
+       A: Future<Output=Option<()>> + Unpin, B: Future<Output=Option<()>> + Unpin, C: Future<Output=tokio::io::Result<()>> + Unpin
 > Future for ThreeSelector<A, B, C> {
        type Output = SelectorOutput;
        fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll<SelectorOutput> {
@@ -119,7 +119,7 @@ impl<
 /// Connection object (in an Arc<Mutex<>>) in each SocketDescriptor we create as well as in the
 /// read future (which is returned by schedule_read).
 struct Connection {
-       writer: Option<io::WriteHalf<TcpStream>>,
+       writer: Option<Arc<TcpStream>>,
        // Because our PeerManager is templated by user-provided types, and we can't (as far as I can
        // tell) have a const RawWakerVTable built out of templated functions, we need some indirection
        // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail.
@@ -156,7 +156,7 @@ impl Connection {
        async fn schedule_read<PM: Deref + 'static + Send + Sync + Clone>(
                peer_manager: PM,
                us: Arc<Mutex<Self>>,
-               mut reader: io::ReadHalf<TcpStream>,
+               reader: Arc<TcpStream>,
                mut read_wake_receiver: mpsc::Receiver<()>,
                mut write_avail_receiver: mpsc::Receiver<()>,
        ) where PM::Target: APeerManager<Descriptor = SocketDescriptor> {
@@ -200,7 +200,7 @@ impl Connection {
                                ThreeSelector {
                                        a: Box::pin(write_avail_receiver.recv()),
                                        b: Box::pin(read_wake_receiver.recv()),
-                                       c: Box::pin(reader.read(&mut buf)),
+                                       c: Box::pin(reader.readable()),
                                }.await
                        };
                        match select_result {
@@ -211,8 +211,9 @@ impl Connection {
                                        }
                                },
                                SelectorOutput::B(_) => {},
-                               SelectorOutput::C(read) => {
-                                       match read {
+                               SelectorOutput::C(res) => {
+                                       if res.is_err() { break Disconnect::PeerDisconnected; }
+                                       match reader.try_read(&mut buf) {
                                                Ok(0) => break Disconnect::PeerDisconnected,
                                                Ok(len) => {
                                                        let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]);
@@ -226,7 +227,11 @@ impl Connection {
                                                                Err(_) => break Disconnect::CloseConnection,
                                                        }
                                                },
-                                               Err(_) => break Disconnect::PeerDisconnected,
+                                               Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
+                                                       // readable() is allowed to spuriously wake, so we have to handle
+                                                       // WouldBlock here.
+                                               },
+                                               Err(e) => break Disconnect::PeerDisconnected,
                                        }
                                },
                        }
@@ -239,18 +244,14 @@ impl Connection {
                        // here.
                        let _ = tokio::task::yield_now().await;
                };
-               let writer_option = us.lock().unwrap().writer.take();
-               if let Some(mut writer) = writer_option {
-                       // If the socket is already closed, shutdown() will fail, so just ignore it.
-                       let _ = writer.shutdown().await;
-               }
+               us.lock().unwrap().writer.take();
                if let Disconnect::PeerDisconnected = disconnect_type {
                        peer_manager.as_ref().socket_disconnected(&our_descriptor);
                        peer_manager.as_ref().process_events();
                }
        }
 
-       fn new(stream: StdTcpStream) -> (io::ReadHalf<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
+       fn new(stream: StdTcpStream) -> (Arc<TcpStream>, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc<Mutex<Self>>) {
                // We only ever need a channel of depth 1 here: if we returned a non-full write to the
                // PeerManager, we will eventually get notified that there is room in the socket to write
                // new bytes, which will generate an event. That event will be popped off the queue before
@@ -262,11 +263,11 @@ impl Connection {
                // false.
                let (read_waker, read_receiver) = mpsc::channel(1);
                stream.set_nonblocking(true).unwrap();
-               let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap());
+               let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap());
 
-               (reader, write_receiver, read_receiver,
+               (Arc::clone(&tokio_stream), write_receiver, read_receiver,
                Arc::new(Mutex::new(Self {
-                       writer: Some(writer), write_avail, read_waker, read_paused: false,
+                       writer: Some(tokio_stream), write_avail, read_waker, read_paused: false,
                        rl_requested_disconnect: false,
                        id: ID_COUNTER.fetch_add(1, Ordering::AcqRel)
                })))
@@ -462,9 +463,9 @@ impl SocketDescriptor {
 }
 impl peer_handler::SocketDescriptor for SocketDescriptor {
        fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize {
-               // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream,
-               // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with
-               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
+               // To send data, we take a lock on our Connection to access the TcpStream, writing to it if
+               // there's room in the kernel buffer, or otherwise create a new Waker with a
+               // SocketDescriptor in it which can wake up the write_avail Sender, waking up the
                // processing future which will call write_buffer_space_avail and we'll end up back here.
                let mut us = self.conn.lock().unwrap();
                if us.writer.is_none() {
@@ -484,24 +485,18 @@ impl peer_handler::SocketDescriptor for SocketDescriptor {
                let mut ctx = task::Context::from_waker(&waker);
                let mut written_len = 0;
                loop {
-                       match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) {
-                               task::Poll::Ready(Ok(res)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(res, 0);
-                                       written_len += res;
-                                       if written_len == data.len() { return written_len; }
-                               },
-                               task::Poll::Ready(Err(e)) => {
-                                       // The tokio docs *seem* to indicate this can't happen, and I certainly don't
-                                       // know how to handle it if it does (cause it should be a Poll::Pending
-                                       // instead):
-                                       assert_ne!(e.kind(), io::ErrorKind::WouldBlock);
-                                       // Probably we've already been closed, just return what we have and let the
-                                       // read thread handle closing logic.
-                                       return written_len;
+                       match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) {
+                               task::Poll::Ready(Ok(())) => {
+                                       match us.writer.as_ref().unwrap().try_write(&data[written_len..]) {
+                                               Ok(res) => {
+                                                       debug_assert_ne!(res, 0);
+                                                       written_len += res;
+                                                       if written_len == data.len() { return written_len; }
+                                               },
+                                               Err(e) => return written_len,
+                                       }
                                },
+                               task::Poll::Ready(Err(e)) => return written_len,
                                task::Poll::Pending => {
                                        // We're queued up for a write event now, but we need to make sure we also
                                        // pause read given we're now waiting on the remote end to ACK (and in
index 7d698a220676b8dfd7c9a0df41b0b09f0799e2e9..6051f00b90a8327026ff89b2a6a758fd5c6fadc4 100644 (file)
@@ -42,6 +42,7 @@ use crate::ln::channelmanager::ChannelDetails;
 
 use crate::prelude::*;
 use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
+use core::iter::FromIterator;
 use core::ops::Deref;
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use bitcoin::secp256k1::PublicKey;
@@ -285,7 +286,22 @@ where C::Target: chain::Filter,
        where
                FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
        {
+               let funding_outpoints: HashSet<OutPoint> = HashSet::from_iter(self.monitors.read().unwrap().keys().cloned());
+               for funding_outpoint in funding_outpoints.iter() {
+                       let monitor_lock = self.monitors.read().unwrap();
+                       if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
+                               self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
+                       }
+               }
+
+               // do some followup cleanup if any funding outpoints were added in between iterations
                let monitor_states = self.monitors.write().unwrap();
+               for (funding_outpoint, monitor_state) in monitor_states.iter() {
+                       if !funding_outpoints.contains(funding_outpoint) {
+                               self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state);
+                       }
+               }
+
                if let Some(height) = best_height {
                        // If the best block height is being updated, update highest_chain_height under the
                        // monitors write lock.
@@ -295,55 +311,55 @@ where C::Target: chain::Filter,
                                self.highest_chain_height.store(new_height, Ordering::Release);
                        }
                }
+       }
 
-               for (funding_outpoint, monitor_state) in monitor_states.iter() {
-                       let monitor = &monitor_state.monitor;
-                       let mut txn_outputs;
-                       {
-                               txn_outputs = process(monitor, txdata);
-                               let update_id = MonitorUpdateId {
-                                       contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
-                               };
-                               let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
-                               if let Some(height) = best_height {
-                                       if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
-                                               // If there are not ChainSync persists awaiting completion, go ahead and
-                                               // set last_chain_persist_height here - we wouldn't want the first
-                                               // InProgress to always immediately be considered "overly delayed".
-                                               monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
-                                       }
+       fn update_monitor_with_chain_data<FN>(&self, header: &BlockHeader, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder<ChannelSigner>) where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
+               let monitor = &monitor_state.monitor;
+               let mut txn_outputs;
+               {
+                       txn_outputs = process(monitor, txdata);
+                       let update_id = MonitorUpdateId {
+                               contents: UpdateOrigin::ChainSync(self.sync_persistence_id.get_increment()),
+                       };
+                       let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
+                       if let Some(height) = best_height {
+                               if !monitor_state.has_pending_chainsync_updates(&pending_monitor_updates) {
+                                       // If there are not ChainSync persists awaiting completion, go ahead and
+                                       // set last_chain_persist_height here - we wouldn't want the first
+                                       // InProgress to always immediately be considered "overly delayed".
+                                       monitor_state.last_chain_persist_height.store(height as usize, Ordering::Release);
                                }
+                       }
 
-                               log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
-                               match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
-                                       ChannelMonitorUpdateStatus::Completed =>
-                                               log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
-                                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                                               monitor_state.channel_perm_failed.store(true, Ordering::Release);
-                                               self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
-                                               self.event_notifier.notify();
-                                       },
-                                       ChannelMonitorUpdateStatus::InProgress => {
-                                               log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
-                                               pending_monitor_updates.push(update_id);
-                                       },
+                       log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                       match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
+                               ChannelMonitorUpdateStatus::Completed =>
+                                       log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
+                               ChannelMonitorUpdateStatus::PermanentFailure => {
+                                       monitor_state.channel_perm_failed.store(true, Ordering::Release);
+                                       self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id()));
+                                       self.event_notifier.notify();
+                               }
+                               ChannelMonitorUpdateStatus::InProgress => {
+                                       log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
+                                       pending_monitor_updates.push(update_id);
                                }
                        }
+               }
 
-                       // Register any new outputs with the chain source for filtering, storing any dependent
-                       // transactions from within the block that previously had not been included in txdata.
-                       if let Some(ref chain_source) = self.chain_source {
-                               let block_hash = header.block_hash();
-                               for (txid, mut outputs) in txn_outputs.drain(..) {
-                                       for (idx, output) in outputs.drain(..) {
-                                               // Register any new outputs with the chain source for filtering
-                                               let output = WatchedOutput {
-                                                       block_hash: Some(block_hash),
-                                                       outpoint: OutPoint { txid, index: idx as u16 },
-                                                       script_pubkey: output.script_pubkey,
-                                               };
-                                               chain_source.register_output(output)
-                                       }
+               // Register any new outputs with the chain source for filtering, storing any dependent
+               // transactions from within the block that previously had not been included in txdata.
+               if let Some(ref chain_source) = self.chain_source {
+                       let block_hash = header.block_hash();
+                       for (txid, mut outputs) in txn_outputs.drain(..) {
+                               for (idx, output) in outputs.drain(..) {
+                                       // Register any new outputs with the chain source for filtering
+                                       let output = WatchedOutput {
+                                               block_hash: Some(block_hash),
+                                               outpoint: OutPoint { txid, index: idx as u16 },
+                                               script_pubkey: output.script_pubkey,
+                                       };
+                                       chain_source.register_output(output)
                                }
                        }
                }
@@ -976,7 +992,7 @@ mod tests {
                        assert!(err.contains("ChannelMonitor storage failure")));
                check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
                check_closed_broadcast!(nodes[0], true);
-               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, 
+               check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
                        [nodes[1].node.get_our_node_id()], 100000);
 
                // However, as the ChainMonitor is still waiting for the original persistence to complete,
index da3970da8eb887f6e33d3e9981eacdb77609e8f9..47f5605edbb3cbeaa45ff3dfed8609baa6a422fa 100644 (file)
@@ -2642,7 +2642,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                                }
                                        } else if !self.holder_tx_signed {
                                                log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast");
-                                               log_error!(logger, "    in channel monitor for channel {}!", log_bytes!(self.funding_info.0.to_channel_id()));
+                                               log_error!(logger, "    in channel monitor for channel {}!", &self.funding_info.0.to_channel_id());
                                                log_error!(logger, "    Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!");
                                        } else {
                                                // If we generated a MonitorEvent::CommitmentTxConfirmed, the ChannelManager
@@ -3389,7 +3389,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                                if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
                                        let mut balance_spendable_csv = None;
                                        log_info!(logger, "Channel {} closed by funding output spend in txid {}.",
-                                               log_bytes!(self.funding_info.0.to_channel_id()), txid);
+                                               &self.funding_info.0.to_channel_id(), txid);
                                        self.funding_spend_seen = true;
                                        let mut commitment_tx_to_counterparty_output = None;
                                        if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.0 >> 8*3) as u8 == 0x20 {
index ce449a4102c31498232874498c3e6cced2da8c20..22e7ec2c2f9eb90fcfaa8f5b754c27540aa854e8 100644 (file)
@@ -9,7 +9,9 @@
 
 //! Types describing on-chain transactions.
 
+use crate::ln::ChannelId;
 use bitcoin::hash_types::Txid;
+use bitcoin::hashes::Hash;
 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
 use bitcoin::blockdata::transaction::Transaction;
 
@@ -57,12 +59,8 @@ pub struct OutPoint {
 
 impl OutPoint {
        /// Convert an `OutPoint` to a lightning channel id.
-       pub fn to_channel_id(&self) -> [u8; 32] {
-               let mut res = [0; 32];
-               res[..].copy_from_slice(&self.txid[..]);
-               res[30] ^= ((self.index >> 8) & 0xff) as u8;
-               res[31] ^= ((self.index >> 0) & 0xff) as u8;
-               res
+       pub fn to_channel_id(&self) -> ChannelId {
+               ChannelId::v1_from_funding_txid(&self.txid.as_inner(), self.index)
        }
 
        /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin
@@ -94,10 +92,10 @@ mod tests {
                assert_eq!(&OutPoint {
                        txid: tx.txid(),
                        index: 0
-               }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
+               }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]);
                assert_eq!(&OutPoint {
                        txid: tx.txid(),
                        index: 1
-               }.to_channel_id(), &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
+               }.to_channel_id().0[..], &hex::decode("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]);
        }
 }
index 36c0f20d2aed5d99f8183240d3332d67e0cd0130..95f2eb357dcd57a395227b828704ad9587c7dac5 100644 (file)
@@ -23,7 +23,7 @@ use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields};
 use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
 use crate::ln::features::ChannelTypeFeatures;
 use crate::ln::msgs;
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::routing::gossip::NetworkUpdate;
 use crate::util::errors::APIError;
 use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, RequiredWrapper, UpgradableRequired, WithoutLength};
@@ -83,7 +83,7 @@ impl_writeable_tlv_based_enum!(PaymentPurpose,
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClaimedHTLC {
        /// The `channel_id` of the channel over which the HTLC was received.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The `user_channel_id` of the channel over which the HTLC was received. This is the value
        /// passed in to [`ChannelManager::create_channel`] for outbound channels, or to
        /// [`ChannelManager::accept_inbound_channel`] for inbound channels if
@@ -246,7 +246,7 @@ pub enum HTLCDestination {
                /// counterparty node information.
                node_id: Option<PublicKey>,
                /// The outgoing `channel_id` between us and the next node.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
        },
        /// Scenario where we are unsure of the next node to forward the HTLC to.
        UnknownNextHop {
@@ -364,7 +364,7 @@ pub enum Event {
                /// [`ChannelManager::funding_transaction_generated`].
                ///
                /// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
-               temporary_channel_id: [u8; 32],
+               temporary_channel_id: ChannelId,
                /// The counterparty's node_id, which you'll need to pass back into
                /// [`ChannelManager::funding_transaction_generated`].
                ///
@@ -458,7 +458,7 @@ pub enum Event {
                /// payment is to pay an invoice or to send a spontaneous payment.
                purpose: PaymentPurpose,
                /// The `channel_id` indicating over which channel we received the payment.
-               via_channel_id: Option<[u8; 32]>,
+               via_channel_id: Option<ChannelId>,
                /// The `user_channel_id` indicating over which channel we received the payment.
                via_user_channel_id: Option<u128>,
                /// The block height at which this payment will be failed back and will no longer be
@@ -718,17 +718,17 @@ pub enum Event {
                /// The `channel_id` indicating which channel the spendable outputs belong to.
                ///
                /// This will always be `Some` for events generated by LDK versions 0.0.117 and above.
-               channel_id: Option<[u8; 32]>,
+               channel_id: Option<ChannelId>,
        },
        /// This event is generated when a payment has been successfully forwarded through us and a
        /// forwarding fee earned.
        PaymentForwarded {
                /// The incoming channel between the previous node and us. This is only `None` for events
                /// generated or serialized by versions prior to 0.0.107.
-               prev_channel_id: Option<[u8; 32]>,
+               prev_channel_id: Option<ChannelId>,
                /// The outgoing channel between the next node and us. This is only `None` for events
                /// generated or serialized by versions prior to 0.0.107.
-               next_channel_id: Option<[u8; 32]>,
+               next_channel_id: Option<ChannelId>,
                /// The fee, in milli-satoshis, which was earned as a result of the payment.
                ///
                /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
@@ -759,7 +759,7 @@ pub enum Event {
        /// [`Event::ChannelReady`] event.
        ChannelPending {
                /// The `channel_id` of the channel that is pending confirmation.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -772,7 +772,7 @@ pub enum Event {
                /// The `temporary_channel_id` this channel used to be known by during channel establishment.
                ///
                /// Will be `None` for channels created prior to LDK version 0.0.115.
-               former_temporary_channel_id: Option<[u8; 32]>,
+               former_temporary_channel_id: Option<ChannelId>,
                /// The `node_id` of the channel counterparty.
                counterparty_node_id: PublicKey,
                /// The outpoint of the channel's funding transaction.
@@ -784,7 +784,7 @@ pub enum Event {
        /// establishment.
        ChannelReady {
                /// The `channel_id` of the channel that is ready.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -811,7 +811,7 @@ pub enum Event {
        ChannelClosed  {
                /// The `channel_id` of the channel which has been closed. Note that on-chain transactions
                /// resolving the channel are likely still awaiting confirmation.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
                /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
                /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
@@ -838,7 +838,7 @@ pub enum Event {
        /// inputs for another purpose.
        DiscardFunding {
                /// The channel_id of the channel which has been closed.
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The full transaction received from the user
                transaction: Transaction
        },
@@ -863,7 +863,7 @@ pub enum Event {
                ///
                /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
                /// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
-               temporary_channel_id: [u8; 32],
+               temporary_channel_id: ChannelId,
                /// The node_id of the counterparty requesting to open the channel.
                ///
                /// When responding to the request, the `counterparty_node_id` should be passed
@@ -909,7 +909,7 @@ pub enum Event {
        /// requirements (i.e. insufficient fees paid, or a CLTV that is too soon).
        HTLCHandlingFailed {
                /// The channel over which the HTLC was received.
-               prev_channel_id: [u8; 32],
+               prev_channel_id: ChannelId,
                /// Destination of the HTLC that failed to be processed.
                failed_next_destination: HTLCDestination,
        },
@@ -1279,7 +1279,7 @@ impl MaybeReadable for Event {
                        5u8 => {
                                let f = || {
                                        let mut outputs = WithoutLength(Vec::new());
-                                       let mut channel_id: Option<[u8; 32]> = None;
+                                       let mut channel_id: Option<ChannelId> = None;
                                        read_tlv_fields!(reader, {
                                                (0, outputs, required),
                                                (1, channel_id, option),
@@ -1335,7 +1335,7 @@ impl MaybeReadable for Event {
                        },
                        9u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut reason = UpgradableRequired(None);
                                        let mut user_channel_id_low_opt: Option<u64> = None;
                                        let mut user_channel_id_high_opt: Option<u64> = None;
@@ -1363,7 +1363,7 @@ impl MaybeReadable for Event {
                        },
                        11u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut transaction = Transaction{ version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() };
                                        read_tlv_fields!(reader, {
                                                (0, channel_id, required),
@@ -1474,7 +1474,7 @@ impl MaybeReadable for Event {
                        },
                        25u8 => {
                                let f = || {
-                                       let mut prev_channel_id = [0; 32];
+                                       let mut prev_channel_id = ChannelId::new_zero();
                                        let mut failed_next_destination_opt = UpgradableRequired(None);
                                        read_tlv_fields!(reader, {
                                                (0, prev_channel_id, required),
@@ -1490,7 +1490,7 @@ impl MaybeReadable for Event {
                        27u8 => Ok(None),
                        29u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut user_channel_id: u128 = 0;
                                        let mut counterparty_node_id = RequiredWrapper(None);
                                        let mut channel_type = RequiredWrapper(None);
@@ -1512,7 +1512,7 @@ impl MaybeReadable for Event {
                        },
                        31u8 => {
                                let f = || {
-                                       let mut channel_id = [0; 32];
+                                       let mut channel_id = ChannelId::new_zero();
                                        let mut user_channel_id: u128 = 0;
                                        let mut former_temporary_channel_id = None;
                                        let mut counterparty_node_id = RequiredWrapper(None);
index 8e52af093d6c59e42a05d5a8e11d0307fcd8c367..a9a5790b06c428cd09ff74eecedc743c65949a08 100644 (file)
@@ -23,7 +23,7 @@ use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentI
 use crate::ln::channel::AnnouncementSigsState;
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::errors::APIError;
 use crate::util::ser::{ReadableArgs, Writeable};
 use crate::util::test_utils::TestBroadcaster;
@@ -111,7 +111,7 @@ fn test_monitor_and_persister_update_fail() {
        let chain_mon = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
index a8e8c8053b6b5d627ad02776665cde72cf2cb3ec..da1364021cefe99e9f1b63e2ecd215d8f67b438e 100644 (file)
@@ -22,7 +22,7 @@ use bitcoin::secp256k1::{PublicKey,SecretKey};
 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
 use bitcoin::secp256k1;
 
-use crate::ln::{PaymentPreimage, PaymentHash};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
@@ -532,7 +532,7 @@ pub(super) struct ReestablishResponses {
 /// channel's counterparty_node_id and channel_id).
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
-       Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
+       Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>
 );
 
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
@@ -637,8 +637,11 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
 
        user_id: u128,
 
-       channel_id: [u8; 32],
-       temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
+       /// The current channel ID.
+       channel_id: ChannelId,
+       /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
+       /// Will be `None` for channels created prior to 0.0.115.
+       temporary_channel_id: Option<ChannelId>,
        channel_state: u32,
 
        // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
@@ -957,14 +960,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        // Public utilities:
 
-       pub fn channel_id(&self) -> [u8; 32] {
+       pub fn channel_id(&self) -> ChannelId {
                self.channel_id
        }
 
        // Return the `temporary_channel_id` used during channel establishment.
        //
        // Will return `None` for channels created prior to LDK version 0.0.115.
-       pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+       pub fn temporary_channel_id(&self) -> Option<ChannelId> {
                self.temporary_channel_id
        }
 
@@ -1232,7 +1235,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
                        commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
                        get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
-                       log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
+                       &self.channel_id,
+                       if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
 
                macro_rules! get_htlc_in_commitment {
                        ($htlc: expr, $offered: expr) => {
@@ -2206,8 +2210,6 @@ impl<SP: Deref> Channel<SP> where
                }
                assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
-               let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
-
                // ChannelManager may generate duplicate claims/fails due to HTLC update events from
                // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
                // these, but for now we just have to treat them as normal.
@@ -2216,13 +2218,15 @@ impl<SP: Deref> Channel<SP> where
                let mut htlc_value_msat = 0;
                for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
                        if htlc.htlc_id == htlc_id_arg {
-                               assert_eq!(htlc.payment_hash, payment_hash_calc);
+                               debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
+                               log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
+                                       htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
                                match htlc.state {
                                        InboundHTLCState::Committed => {},
                                        InboundHTLCState::LocalRemoved(ref reason) => {
                                                if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
                                                } else {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, log_bytes!(self.context.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
                                                return UpdateFulfillFetch::DuplicateClaim {};
@@ -2275,7 +2279,7 @@ impl<SP: Deref> Channel<SP> where
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
@@ -2285,7 +2289,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
@@ -2303,7 +2307,7 @@ impl<SP: Deref> Channel<SP> where
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
                                return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                        }
-                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
 
@@ -2442,7 +2446,7 @@ impl<SP: Deref> Channel<SP> where
                                        _ => {}
                                }
                        }
-                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+                       log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
                        self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
                                htlc_id: htlc_id_arg,
                                err_packet,
@@ -2450,7 +2454,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
                {
                        let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
@@ -2493,7 +2497,7 @@ impl<SP: Deref> Channel<SP> where
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
                let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
@@ -2547,7 +2551,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.cur_holder_commitment_transaction_number -= 1;
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 
                let need_channel_ready = self.check_get_channel_ready(0).is_some();
                self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
@@ -2621,7 +2625,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
                self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
 
-               log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
 
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
@@ -2749,7 +2753,7 @@ impl<SP: Deref> Channel<SP> where
                        if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
                                // Note that if the pending_forward_status is not updated here, then it's because we're already failing
                                // the HTLC, i.e. its status is already set to failing.
-                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
+                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                } else {
@@ -2877,7 +2881,7 @@ impl<SP: Deref> Channel<SP> where
                        log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
                                log_bytes!(msg.signature.serialize_compact()[..]),
                                log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
-                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
                                return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
@@ -2947,7 +2951,7 @@ impl<SP: Deref> Channel<SP> where
                                let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
-                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
+                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
                                if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
                                        return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
@@ -2991,7 +2995,7 @@ impl<SP: Deref> Channel<SP> where
                        } else { None };
                        if let Some(forward_info) = new_forward {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
-                                       &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                                       &htlc.payment_hash, &self.context.channel_id);
                                htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
                                need_commitment = true;
                        }
@@ -3000,7 +3004,7 @@ impl<SP: Deref> Channel<SP> where
                for htlc in self.context.pending_outbound_htlcs.iter_mut() {
                        if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
-                                       &htlc.payment_hash, log_bytes!(self.context.channel_id));
+                                       &htlc.payment_hash, &self.context.channel_id);
                                // Grab the preimage, if it exists, instead of cloning
                                let mut reason = OutboundHTLCOutcome::Success(None);
                                mem::swap(outcome, &mut reason);
@@ -3050,7 +3054,7 @@ impl<SP: Deref> Channel<SP> where
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
                        log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
-                               log_bytes!(self.context.channel_id));
+                               &self.context.channel_id);
                        return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
@@ -3067,7 +3071,7 @@ impl<SP: Deref> Channel<SP> where
                } else { false };
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+                       &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
                self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
                return Ok(self.push_ret_blockable_mon_update(monitor_update));
        }
@@ -3096,7 +3100,7 @@ impl<SP: Deref> Channel<SP> where
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
-                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
+                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
 
                        let mut monitor_update = ChannelMonitorUpdate {
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
@@ -3127,8 +3131,7 @@ impl<SP: Deref> Channel<SP> where
                                                        Err(e) => {
                                                                match e {
                                                                        ChannelError::Ignore(ref msg) => {
-                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
-                                                                                       &payment_hash, msg, log_bytes!(self.context.channel_id()));
+                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
                                                                                // If we fail to send here, then this HTLC should
                                                                                // be failed backwards. Failing to send here
                                                                                // indicates that this HTLC may keep being put back
@@ -3194,7 +3197,7 @@ impl<SP: Deref> Channel<SP> where
                        monitor_update.updates.append(&mut additional_update.updates);
 
                        log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
-                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+                               &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
                                update_add_count, update_fulfill_count, update_fail_count);
 
                        self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
@@ -3283,7 +3286,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
                }
 
-               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
                let mut to_forward_infos = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut finalized_claimed_htlcs = Vec::new();
@@ -3428,7 +3431,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.context.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
-                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
                        return_with_htlcs_to_fail!(Vec::new());
                }
 
@@ -3440,7 +3443,7 @@ impl<SP: Deref> Channel<SP> where
                                monitor_update.updates.append(&mut additional_update.updates);
 
                                log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
-                                       log_bytes!(self.context.channel_id()), release_state_str);
+                                       &self.context.channel_id(), release_state_str);
 
                                self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                return_with_htlcs_to_fail!(htlcs_to_fail);
@@ -3455,7 +3458,7 @@ impl<SP: Deref> Channel<SP> where
                                        monitor_update.updates.append(&mut additional_update.updates);
 
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
-                                               log_bytes!(self.context.channel_id()),
+                                               &self.context.channel_id(),
                                                update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
                                                release_state_str);
 
@@ -3463,7 +3466,7 @@ impl<SP: Deref> Channel<SP> where
                                        return_with_htlcs_to_fail!(htlcs_to_fail);
                                } else {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
-                                               log_bytes!(self.context.channel_id()), release_state_str);
+                                               &self.context.channel_id(), release_state_str);
 
                                        self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
                                        return_with_htlcs_to_fail!(htlcs_to_fail);
@@ -3625,7 +3628,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.sent_message_awaiting_response = None;
 
                self.context.channel_state |= ChannelState::PeerDisconnected as u32;
-               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
@@ -3728,7 +3731,7 @@ impl<SP: Deref> Channel<SP> where
                self.context.monitor_pending_commitment_signed = false;
                let order = self.context.resend_order.clone();
                log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
-                       log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+                       &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
@@ -3840,7 +3843,7 @@ impl<SP: Deref> Channel<SP> where
                } else { None };
 
                log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
-                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+                               &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
                msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -3896,8 +3899,8 @@ impl<SP: Deref> Channel<SP> where
                        if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
                                macro_rules! log_and_panic {
                                        ($err_msg: expr) => {
-                                               log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
-                                               panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+                                               log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
+                                               panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
                                        }
                                }
                                log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
@@ -3996,9 +3999,9 @@ impl<SP: Deref> Channel<SP> where
 
                if msg.next_local_commitment_number == next_counterparty_commitment_number {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
                        }
 
                        Ok(ReestablishResponses {
@@ -4009,9 +4012,9 @@ impl<SP: Deref> Channel<SP> where
                        })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
                        if required_revoke.is_some() {
-                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
                        } else {
-                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
+                               log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
                        }
 
                        if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
@@ -4752,14 +4755,14 @@ impl<SP: Deref> Channel<SP> where
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
                                        if let Some(channel_ready) = self.check_get_channel_ready(height) {
-                                               log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
+                                               log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                                                let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
                                                return Ok((Some(channel_ready), announcement_sigs));
                                        }
                                }
                                for inp in tx.input.iter() {
                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
-                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
+                                               log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
                                                return Err(ClosureReason::CommitmentTxConfirmed);
                                        }
                                }
@@ -4821,7 +4824,7 @@ impl<SP: Deref> Channel<SP> where
                        let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
                                self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
                        } else { None };
-                       log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
+                       log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                        return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
                }
 
@@ -4852,7 +4855,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
                                height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
-                       log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id));
+                       log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
                        // If funding_tx_confirmed_in is unset, the channel must not be active
                        assert!(non_shutdown_state <= ChannelState::ChannelReady as u32);
                        assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
@@ -4962,7 +4965,7 @@ impl<SP: Deref> Channel<SP> where
                        return None;
                }
 
-               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
+               log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
                let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
                        Ok(a) => a,
                        Err(e) => {
@@ -5097,10 +5100,10 @@ impl<SP: Deref> Channel<SP> where
                let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
                let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
                        let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
-                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
+                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
                        remote_last_secret
                } else {
-                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
+                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
                        [0;32]
                };
                self.mark_awaiting_response();
@@ -5216,7 +5219,8 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
-               log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+               log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
+                       payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
                        else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
                        else { "to peer" });
@@ -5380,14 +5384,14 @@ impl<SP: Deref> Channel<SP> where
                                        log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
                                                encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
                                                &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
-                                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+                                               log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
 
                                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
                                                log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
                                                        encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
                                                        encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
                                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
-                                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+                                                       log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
                                        }
                                }
 
@@ -5635,7 +5639,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
                };
 
-               let temporary_channel_id = entropy_source.get_secure_random_bytes();
+               let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
 
                Ok(Self {
                        context: ChannelContext {
@@ -6483,7 +6487,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
                                log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
                                encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
-                               encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                               encode::serialize_hex(&funding_script), &self.context.channel_id());
                        secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
 
@@ -6493,7 +6497,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
                let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+                       &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
                match &self.context.holder_signer {
                        // TODO (arik): move match into calling method for Taproot
@@ -6587,7 +6591,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
 
-               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
+               log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id());
 
                // Promote the channel to a full-fledged one now that we have updated the state and have a
                // `ChannelMonitor`.
@@ -7257,7 +7261,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut user_id_high_opt: Option<u64> = None;
                let mut channel_keys_id: Option<[u8; 32]> = None;
-               let mut temporary_channel_id: Option<[u8; 32]> = None;
+               let mut temporary_channel_id: Option<ChannelId> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
 
                let mut blocked_monitor_updates = Some(Vec::new());
diff --git a/lightning/src/ln/channel_id.rs b/lightning/src/ln/channel_id.rs
new file mode 100644 (file)
index 0000000..cdbcd30
--- /dev/null
@@ -0,0 +1,143 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! ChannelId definition.
+
+use crate::ln::msgs::DecodeError;
+use crate::sign::EntropySource;
+use crate::util::ser::{Readable, Writeable, Writer};
+
+use bitcoin::hashes::hex::ToHex;
+
+use crate::io;
+use crate::prelude::*;
+use core::fmt;
+use core::ops::Deref;
+
+/// A unique 32-byte identifier for a channel.
+/// Depending on how the ID is generated, several varieties are distinguished
+/// (but all are stored as 32 bytes):
+///   _v1_ and _temporary_.
+/// A _v1_ channel ID is generated based on funding tx outpoint (txid & index).
+/// A _temporary_ ID is generated randomly.
+/// (Later revocation-point-based _v2_ is a possibility.)
+/// The variety (context) is not stored, it is relevant only at creation.
+///
+/// This is not exported to bindings users as we just use [u8; 32] directly.
+#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct ChannelId(pub [u8; 32]);
+
+impl ChannelId {
+       /// Create _v1_ channel ID based on a funding TX ID and output index
+       pub fn v1_from_funding_txid(txid: &[u8; 32], output_index: u16) -> Self {
+               let mut res = [0; 32];
+               res[..].copy_from_slice(&txid[..]);
+               res[30] ^= ((output_index >> 8) & 0xff) as u8;
+               res[31] ^= ((output_index >> 0) & 0xff) as u8;
+               Self(res)
+       }
+
+       /// Create a _temporary_ channel ID randomly, based on an entropy source.
+       pub fn temporary_from_entropy_source<ES: Deref>(entropy_source: &ES) -> Self
+       where ES::Target: EntropySource {
+               Self(entropy_source.get_secure_random_bytes())
+       }
+
+       /// Generic constructor; create a new channel ID from the provided data.
+       /// Use a more specific `*_from_*` constructor when possible.
+       pub fn from_bytes(data: [u8; 32]) -> Self {
+               Self(data)
+       }
+
+       /// Create a channel ID consisting of all-zeros data (e.g. when uninitialized or a placeholder).
+       pub fn new_zero() -> Self {
+               Self([0; 32])
+       }
+
+       /// Check whether ID is consisting of all zeros (uninitialized)
+       pub fn is_zero(&self) -> bool {
+               self.0[..] == [0; 32]
+       }
+}
+
+impl Writeable for ChannelId {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.0.write(w)
+       }
+}
+
+impl Readable for ChannelId {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let buf: [u8; 32] = Readable::read(r)?;
+               Ok(ChannelId(buf))
+       }
+}
+
+impl ToHex for ChannelId {
+       fn to_hex(&self) -> String {
+               self.0.to_hex()
+       }
+}
+
+impl fmt::Display for ChannelId {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               crate::util::logger::DebugBytes(&self.0).fmt(f)
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use crate::ln::ChannelId;
+       use crate::util::ser::{Readable, Writeable};
+       use crate::util::test_utils;
+       use bitcoin::hashes::hex::ToHex;
+       use crate::prelude::*;
+       use crate::io;
+
+       #[test]
+       fn test_channel_id_v1_from_funding_txid() {
+               let channel_id = ChannelId::v1_from_funding_txid(&[2; 32], 1);
+               assert_eq!(channel_id.to_hex(), "0202020202020202020202020202020202020202020202020202020202020203");
+       }
+
+       #[test]
+       fn test_channel_id_new_from_data() {
+               let data: [u8; 32] = [2; 32];
+               let channel_id = ChannelId::from_bytes(data.clone());
+               assert_eq!(channel_id.0, data);
+       }
+
+       #[test]
+       fn test_channel_id_equals() {
+               let channel_id11 = ChannelId::v1_from_funding_txid(&[2; 32], 2);
+               let channel_id12 = ChannelId::v1_from_funding_txid(&[2; 32], 2);
+               let channel_id21 = ChannelId::v1_from_funding_txid(&[2; 32], 42);
+               assert_eq!(channel_id11, channel_id12);
+               assert_ne!(channel_id11, channel_id21);
+       }
+
+       #[test]
+       fn test_channel_id_write_read() {
+               let data: [u8; 32] = [2; 32];
+               let channel_id = ChannelId::from_bytes(data.clone());
+
+               let mut w = test_utils::TestVecWriter(Vec::new());
+               channel_id.write(&mut w).unwrap();
+
+               let channel_id_2 = ChannelId::read(&mut io::Cursor::new(&w.0)).unwrap();
+               assert_eq!(channel_id_2, channel_id);
+               assert_eq!(channel_id_2.0, data);
+       }
+
+       #[test]
+       fn test_channel_id_display() {
+               let channel_id = ChannelId::v1_from_funding_txid(&[2; 32], 1);
+               assert_eq!(format!("{}", &channel_id), "0202020202020202020202020202020202020202020202020202020202020203");
+       }
+}
index cf280fabc3188d501e2fc45c79e991dfc4aa1940..213a2882fbc3f899e5dd43ebc09a92a47f35c39a 100644 (file)
@@ -39,7 +39,7 @@ use crate::events;
 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
-use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
+use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
@@ -418,13 +418,13 @@ impl Into<u16> for FailureCode {
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
+       chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
        channel_capacity: Option<u64>,
 }
 impl MsgHandleErrInternal {
        #[inline]
-       fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
+       fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
                Self {
                        err: LightningError {
                                err: err.clone(),
@@ -445,7 +445,7 @@ impl MsgHandleErrInternal {
                Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
                Self {
                        err: LightningError {
                                err: err.clone(),
@@ -462,7 +462,7 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
+       fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
                Self {
                        err: match err {
                                ChannelError::Warn(msg) =>  LightningError {
@@ -587,7 +587,7 @@ enum BackgroundEvent {
        /// on a channel.
        MonitorUpdatesComplete {
                counterparty_node_id: PublicKey,
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
        },
 }
 
@@ -648,7 +648,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
        /// durably to disk.
        ForwardedPaymentInboundClaim {
                /// The upstream channel ID (i.e. the inbound edge).
-               channel_id: [u8; 32],
+               channel_id: ChannelId,
                /// The HTLC ID on the inbound edge.
                htlc_id: u64,
        },
@@ -674,26 +674,26 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
        /// `channel_id` -> `Channel`.
        ///
        /// Holds all funded channels where the peer is the counterparty.
-       pub(super) channel_by_id: HashMap<[u8; 32], Channel<SP>>,
+       pub(super) channel_by_id: HashMap<ChannelId, Channel<SP>>,
        /// `temporary_channel_id` -> `OutboundV1Channel`.
        ///
        /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
        /// been assigned a `channel_id`, the entry in this map is removed and one is created in
        /// `channel_by_id`.
-       pub(super) outbound_v1_channel_by_id: HashMap<[u8; 32], OutboundV1Channel<SP>>,
+       pub(super) outbound_v1_channel_by_id: HashMap<ChannelId, OutboundV1Channel<SP>>,
        /// `temporary_channel_id` -> `InboundV1Channel`.
        ///
        /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
        /// been assigned a `channel_id`, the entry in this map is removed and one is created in
        /// `channel_by_id`.
-       pub(super) inbound_v1_channel_by_id: HashMap<[u8; 32], InboundV1Channel<SP>>,
+       pub(super) inbound_v1_channel_by_id: HashMap<ChannelId, InboundV1Channel<SP>>,
        /// `temporary_channel_id` -> `InboundChannelRequest`.
        ///
        /// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
        /// the peer is the counterparty. If the channel is accepted, then the entry in this table is
        /// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
        /// the channel is rejected, then the entry is simply removed.
-       pub(super) inbound_channel_request_by_id: HashMap<[u8; 32], InboundChannelRequest>,
+       pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
        /// The latest `InitFeatures` we heard from the peer.
        latest_features: InitFeatures,
        /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
@@ -720,12 +720,12 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
        /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
        /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
        /// duplicates do not occur, so such channels should fail without a monitor update completing.
-       monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+       monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
        /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
        /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
        /// will remove a preimage that needs to be durably in an upstream channel first), we put an
        /// entry here to note that the channel with the key's ID is blocked on a set of actions.
-       actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+       actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
@@ -753,11 +753,11 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
        }
 
        // Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
-       fn has_channel(&self, channel_id: &[u8; 32]) -> bool {
-               self.channel_by_id.contains_key(channel_id) ||
-                       self.outbound_v1_channel_by_id.contains_key(channel_id) ||
-                       self.inbound_v1_channel_by_id.contains_key(channel_id) ||
-                       self.inbound_channel_request_by_id.contains_key(channel_id)
+       fn has_channel(&self, channel_id: &ChannelId) -> bool {
+               self.channel_by_id.contains_key(&channel_id) ||
+                       self.outbound_v1_channel_by_id.contains_key(&channel_id) ||
+                       self.inbound_v1_channel_by_id.contains_key(&channel_id) ||
+                       self.inbound_channel_request_by_id.contains_key(&channel_id)
        }
 }
 
@@ -1104,7 +1104,7 @@ where
        /// required to access the channel with the `counterparty_node_id`.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+       id_to_peer: Mutex<HashMap<ChannelId, PublicKey>>,
 
        /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
        ///
@@ -1118,9 +1118,9 @@ where
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(test)]
-       pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+       pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
        #[cfg(not(test))]
-       short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+       short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
 
        our_network_pubkey: PublicKey,
 
@@ -1422,7 +1422,7 @@ pub struct ChannelDetails {
        /// thereafter this is the txid of the funding transaction xor the funding transaction output).
        /// Note that this means this value is *not* persistent - it can change once during the
        /// lifetime of the channel.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Parameters which apply to our counterparty. See individual fields for more information.
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
@@ -1821,7 +1821,7 @@ macro_rules! convert_chan_err {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
                        },
                        ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+                               log_error!($self.logger, "Closing channel {} due to close-required error: {}", &$channel_id, msg);
                                update_maps_on_chan_removal!($self, &$channel.context);
                                let shutdown_res = $channel.context.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
@@ -1834,7 +1834,7 @@ macro_rules! convert_chan_err {
                        // We should only ever have `ChannelError::Close` when unfunded channels error.
                        // In any case, just close the channel.
                        ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
-                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", log_bytes!($channel_id[..]), msg);
+                               log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", &$channel_id, msg);
                                update_maps_on_chan_removal!($self, &$channel_context);
                                let shutdown_res = $channel_context.force_shutdown(false);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
@@ -2007,12 +2007,12 @@ macro_rules! handle_new_monitor_update {
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
-                                       log_bytes!($chan.context.channel_id()[..]));
+                                       &$chan.context.channel_id());
                                Ok(false)
                        },
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
-                                       log_bytes!($chan.context.channel_id()[..]));
+                                       &$chan.context.channel_id());
                                update_maps_on_chan_removal!($self, &$chan.context);
                                let res = Err(MsgHandleErrInternal::from_finish_shutdown(
                                        "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
@@ -2261,7 +2261,7 @@ where
        /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
        /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
        /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
-       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
+       pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
                if channel_value_satoshis < 1000 {
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
@@ -2312,7 +2312,7 @@ where
                Ok(temporary_channel_id)
        }
 
-       fn list_funded_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
+       fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
                // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
@@ -2457,7 +2457,7 @@ where
                }, None));
        }
 
-       fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+       fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
@@ -2549,7 +2549,7 @@ where
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
-       pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+       pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, None, None)
        }
 
@@ -2583,7 +2583,7 @@ where
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
        /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
-       pub fn close_channel_with_feerate_and_script(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
+       pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
@@ -2608,7 +2608,7 @@ where
 
        /// `peer_msg` should be set when we receive a message from a peer, but not set when the
        /// user closes, which will be re-exposed as the `ChannelClosed` reason.
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+       fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
        -> Result<PublicKey, APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(peer_node_id)
@@ -2622,33 +2622,33 @@ where
                                ClosureReason::HolderForceClosed
                        };
                        if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                        } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
                                // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                self.issue_channel_close_events(&chan.get().context, closure_reason);
                                let mut chan = remove_channel!(self, chan);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
                                // Unfunded channel has no update
                                (None, chan.context.get_counterparty_node_id())
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
-                               log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+                               log_error!(self.logger, "Force-closing channel {}", &channel_id);
                                // N.B. that we don't send any channel close event here: we
                                // don't have a user_channel_id, and we never sent any opening
                                // events anyway.
                                (None, *peer_node_id)
                        } else {
-                               return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
+                               return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
                        }
                };
                if let Some(update) = update_opt {
@@ -2661,7 +2661,7 @@ where
                Ok(counterparty_node_id)
        }
 
-       fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
@@ -2687,7 +2687,7 @@ where
        /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
        /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
        /// channel.
-       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, true)
        }
@@ -2698,7 +2698,7 @@ where
        ///
        /// You can always get the latest local transaction(s) to broadcast from
        /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
-       pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
        }
@@ -3138,7 +3138,7 @@ where
                if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
                self.get_channel_update_for_unicast(chan)
        }
 
@@ -3154,7 +3154,7 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Attempting to generate channel update for channel {}", &chan.context.channel_id());
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
@@ -3164,7 +3164,7 @@ where
        }
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+               log_trace!(self.logger, "Generating channel update for channel {}", &chan.context.channel_id());
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
                let enabled = chan.context.is_usable() && match chan.channel_update_status() {
@@ -3215,7 +3215,9 @@ where
                // The top-level caller should hold the total_consistency_lock read lock.
                debug_assert!(self.total_consistency_lock.try_write().is_err());
 
-               log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.hops.first().unwrap().short_channel_id);
+               log_trace!(self.logger,
+                       "Attempting to send payment with payment hash {} along path with next hop {}",
+                       payment_hash, path.hops.first().unwrap().short_channel_id);
                let prng_seed = self.entropy_source.get_secure_random_bytes();
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
 
@@ -3458,7 +3460,7 @@ where
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
-               &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+               &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
        ) -> Result<(), APIError> {
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -3466,7 +3468,7 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(temporary_channel_id) {
+               let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(&temporary_channel_id) {
                        Some(chan) => {
                                let funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
@@ -3495,7 +3497,7 @@ where
                                return Err(APIError::ChannelUnavailable {
                                        err: format!(
                                                "Channel with id {} not found for the passed counterparty node_id {}",
-                                               log_bytes!(*temporary_channel_id), counterparty_node_id),
+                                               temporary_channel_id, counterparty_node_id),
                                })
                        },
                };
@@ -3520,7 +3522,7 @@ where
        }
 
        #[cfg(test)]
-       pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+       pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
                self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
                        Ok(OutPoint { txid: tx.txid(), index: output_index })
                })
@@ -3556,7 +3558,7 @@ where
        ///
        /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
        /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
-       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
+       pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                for inp in funding_transaction.input.iter() {
@@ -3629,7 +3631,7 @@ where
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
        pub fn update_partial_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate,
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
        ) -> Result<(), APIError> {
                if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
                        return Err(APIError::APIMisuseError {
@@ -3646,7 +3648,7 @@ where
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
-                                       err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
+                                       err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id),
                                });
                        };
                }
@@ -3678,7 +3680,7 @@ where
                                return Err(APIError::ChannelUnavailable {
                                        err: format!(
                                                "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
-                                               log_bytes!(*channel_id), counterparty_node_id),
+                                               channel_id, counterparty_node_id),
                                });
                        };
                        let mut config = context.config();
@@ -3713,7 +3715,7 @@ where
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
        pub fn update_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
        ) -> Result<(), APIError> {
                return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
        }
@@ -3743,7 +3745,7 @@ where
        /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
        // TODO: when we move to deciding the best outbound channel at forward time, only take
        // `next_node_id` and not `next_hop_channel_id`
-       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
+       pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let next_hop_scid = {
@@ -3752,18 +3754,18 @@ where
                                .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
-                       match peer_state.channel_by_id.get(next_hop_channel_id) {
+                       match peer_state.channel_by_id.get(&next_hop_channel_id) {
                                Some(chan) => {
                                        if !chan.context.is_usable() {
                                                return Err(APIError::ChannelUnavailable {
-                                                       err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+                                                       err: format!("Channel with id {} not fully established", next_hop_channel_id)
                                                })
                                        }
                                        chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
                                },
                                None => return Err(APIError::ChannelUnavailable {
                                        err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
-                                               log_bytes!(*next_hop_channel_id), next_node_id)
+                                               next_hop_channel_id, next_node_id)
                                })
                        }
                };
@@ -4375,21 +4377,21 @@ where
                let _ = self.process_background_events();
        }
 
-       fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
+       fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
                if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                               &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
                if !chan.context.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
-                               log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                               &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
                        return NotifyOption::SkipPersist;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
-                       log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 
                chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
                NotifyOption::DoPersist
@@ -4517,7 +4519,7 @@ where
 
                                                if chan.should_disconnect_peer_awaiting_response() {
                                                        log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
-                                                                       counterparty_node_id, log_bytes!(*chan_id));
+                                                                       counterparty_node_id, chan_id);
                                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                                node_id: counterparty_node_id,
                                                                action: msgs::ErrorAction::DisconnectPeerWithWarning {
@@ -4533,7 +4535,7 @@ where
                                        });
 
                                        let process_unfunded_channel_tick = |
-                                               chan_id: &[u8; 32],
+                                               chan_id: &ChannelId,
                                                chan_context: &mut ChannelContext<SP>,
                                                unfunded_chan_context: &mut UnfundedChannelContext,
                                                pending_msg_events: &mut Vec<MessageSendEvent>,
@@ -4542,7 +4544,7 @@ where
                                                if unfunded_chan_context.should_expire_unfunded_channel() {
                                                        log_error!(self.logger,
                                                                "Force-closing pending channel with ID {} for not establishing in a timely manner",
-                                                               log_bytes!(&chan_id[..]));
+                                                               &chan_id);
                                                        update_maps_on_chan_removal!(self, &chan_context);
                                                        self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
                                                        self.finish_force_close_channel(chan_context.force_shutdown(false));
@@ -4567,7 +4569,7 @@ where
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
-                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", log_bytes!(&chan_id[..]));
+                                                       log_error!(self.logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
                                                        peer_state.pending_msg_events.push(
                                                                events::MessageSendEvent::HandleError {
                                                                        node_id: counterparty_node_id,
@@ -4770,7 +4772,7 @@ where
        // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
        // be surfaced to the user.
        fn fail_holding_cell_htlcs(
-               &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
+               &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
                counterparty_node_id: &PublicKey
        ) {
                let (failure_code, onion_failure_data) = {
@@ -5046,7 +5048,7 @@ where
                                        if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
                                                if let Some(action) = completion_action(Some(htlc_value_msat)) {
                                                        log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
-                                                               log_bytes!(chan_id), action);
+                                                               &chan_id, action);
                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                }
                                                if !during_init {
@@ -5213,7 +5215,7 @@ where
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
        -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
                log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
-                       log_bytes!(channel.context.channel_id()),
+                       &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
                        if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
@@ -5341,7 +5343,7 @@ where
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
-       pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
+       pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
                self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
        }
 
@@ -5363,11 +5365,11 @@ where
        ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
-       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
+       pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
                self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
        }
 
-       fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+       fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
@@ -5733,7 +5735,7 @@ where
                                let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
                                        self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
-                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
+                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", &chan.get().context.channel_id());
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                node_id: counterparty_node_id.clone(),
                                                msg: announcement_sigs,
@@ -5744,7 +5746,7 @@ where
                                        // counterparty's announcement_signatures. Thus, we only bother to send a
                                        // channel_update here if the channel is not public, i.e. we're not sending an
                                        // announcement_signatures.
-                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
+                                       log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", &chan.get().context.channel_id());
                                        if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: counterparty_node_id.clone(),
@@ -5778,13 +5780,13 @@ where
                        // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
                        // https://github.com/lightningdevkit/rust-lightning/issues/2422
                        if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                let mut chan = remove_channel!(self, chan_entry);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
                                return Ok(());
                        } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
-                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", log_bytes!(&msg.channel_id[..]));
+                               log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                let mut chan = remove_channel!(self, chan_entry);
                                self.finish_force_close_channel(chan.context.force_shutdown(false));
@@ -5792,7 +5794,7 @@ where
                        } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
                                if !chan_entry.get().received_shutdown() {
                                        log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
-                                               log_bytes!(msg.channel_id),
+                                               &msg.channel_id,
                                                if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
                                }
 
@@ -6129,7 +6131,7 @@ where
        /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
-               actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+               actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
                channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
        ) -> bool {
                actions_blocking_raa_monitor_updates
@@ -6256,7 +6258,7 @@ where
                                if were_node_one == msg_from_node_one {
                                        return Ok(NotifyOption::SkipPersist);
                                } else {
-                                       log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
+                                       log_debug!(self.logger, "Received channel_update for channel {}.", &chan_id);
                                        try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
                                }
                        },
@@ -6440,7 +6442,7 @@ where
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
 
-                                                       let channel_id: [u8; 32] = *channel_id;
+                                                       let channel_id: ChannelId = *channel_id;
                                                        let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
                                                                peer_state.channel_by_id.remove(&channel_id));
@@ -6774,7 +6776,7 @@ where
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
                                        log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
-                                               log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+                                               &channel_funding_outpoint.to_channel_id());
                                        break;
                                }
 
@@ -6782,7 +6784,7 @@ where
                                        debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                        if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
                                                log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
-                                                       log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+                                                       &channel_funding_outpoint.to_channel_id());
                                                if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                        peer_state_lck, peer_state, per_peer_state, chan)
                                                {
@@ -6795,7 +6797,7 @@ where
                                                }
                                        } else {
                                                log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
-                                                       log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
+                                                       &channel_funding_outpoint.to_channel_id());
                                        }
                                }
                        } else {
@@ -7093,7 +7095,7 @@ where
                                                if let Some(channel_ready) = channel_ready_opt {
                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                        if channel.context.is_usable() {
-                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
+                                                               log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", &channel.context.channel_id());
                                                                if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                                node_id: channel.context.get_counterparty_node_id(),
@@ -7101,7 +7103,7 @@ where
                                                                        });
                                                                }
                                                        } else {
-                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
+                                                               log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", &channel.context.channel_id());
                                                        }
                                                }
 
@@ -7111,7 +7113,7 @@ where
                                                }
 
                                                if let Some(announcement_sigs) = announcement_sigs {
-                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
+                                                       log_trace!(self.logger, "Sending announcement_signatures for channel {}", &channel.context.channel_id());
                                                        pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
                                                                node_id: channel.context.get_counterparty_node_id(),
                                                                msg: announcement_sigs,
@@ -7570,7 +7572,7 @@ where
                                // very low priority for the LND team despite being marked "P1".
                                // We're not going to bother handling this in a sensible way, instead simply
                                // repeating the Shutdown message on repeat until morale improves.
-                               if msg.channel_id != [0; 32] {
+                               if !msg.channel_id.is_zero() {
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() { return; }
@@ -7599,8 +7601,8 @@ where
                        _ => {}
                }
 
-               if msg.channel_id == [0; 32] {
-                       let channel_ids: Vec<[u8; 32]> = {
+               if msg.channel_id.is_zero() {
+                       let channel_ids: Vec<ChannelId> = {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
                                if peer_state_mutex_opt.is_none() { return; }
@@ -8614,7 +8616,7 @@ where
 
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut peer_channels: HashMap<PublicKey, HashMap<ChannelId, Channel<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
@@ -8634,7 +8636,7 @@ where
                                        log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
                                        log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+                                               &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -8664,13 +8666,13 @@ where
                                                        // backwards leg of the HTLC will simply be rejected.
                                                        log_info!(args.logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
-                                                               log_bytes!(channel.context.channel_id()), &payment_hash);
+                                                               &channel.context.channel_id(), &payment_hash);
                                                        failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                                }
                                        }
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
-                                               log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
+                                               &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
@@ -8703,7 +8705,7 @@ where
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
                                }, None));
                        } else {
-                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
+                               log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
                                log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
@@ -8715,7 +8717,7 @@ where
                for (funding_txo, _) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
                                log_info!(args.logger, "Queueing monitor update to ensure missing channel {} is force closed",
-                                       log_bytes!(funding_txo.to_channel_id()));
+                                       &funding_txo.to_channel_id());
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
@@ -8899,7 +8901,7 @@ where
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
                                        log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
-                                               update.update_id, $channel_info_log, log_bytes!($funding_txo.to_channel_id()));
+                                               update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
@@ -8947,7 +8949,7 @@ where
                                        // If the channel is ahead of the monitor, return InvalidValue:
                                        log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
-                                               log_bytes!(chan.context.channel_id()), monitor.get_latest_update_id(), max_in_flight_update_id);
+                                               &chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
                                        log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
@@ -8973,7 +8975,7 @@ where
                                } else {
                                        log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
-                                               log_bytes!(funding_txo.to_channel_id()));
+                                               &funding_txo.to_channel_id());
                                        log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -9059,7 +9061,7 @@ where
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                                log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                                       &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                                false
                                                                                        } else { true }
                                                                                } else { true }
@@ -9069,7 +9071,7 @@ where
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                       &htlc.payment_hash, log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
+                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
                                                                                                intercepted_id != ev_id
@@ -9411,6 +9413,7 @@ mod tests {
        use core::sync::atomic::Ordering;
        use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
        use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{self, ErrorAction};
@@ -9984,7 +9987,7 @@ mod tests {
                nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
                let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
-               let channel_id = &tx.txid().into_inner();
+               let channel_id = ChannelId::from_bytes(tx.txid().into_inner());
                {
                        // Ensure that the `id_to_peer` map is empty until either party has received the
                        // funding transaction, and have the real `channel_id`.
@@ -9998,7 +10001,7 @@ mod tests {
                        // as it has the funding transaction.
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
 
                assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
@@ -10009,7 +10012,7 @@ mod tests {
                {
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
                expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
 
@@ -10018,7 +10021,7 @@ mod tests {
                        // as it has the funding transaction.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
                check_added_monitors!(nodes[1], 1);
                let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -10029,7 +10032,7 @@ mod tests {
                let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
                update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
 
-               nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
                nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
                let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
@@ -10043,7 +10046,7 @@ mod tests {
                        // party's signature for the fee negotiated closing transaction.)
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(channel_id));
+                       assert!(nodes_0_lock.contains_key(&channel_id));
                }
 
                {
@@ -10053,7 +10056,7 @@ mod tests {
                        // kept in the `nodes[1]`'s `id_to_peer` map.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
 
                nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
@@ -10069,7 +10072,7 @@ mod tests {
                        // doesn't have `nodes[0]`'s signature for the closing transaction yet.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(channel_id));
+                       assert!(nodes_1_lock.contains_key(&channel_id));
                }
 
                let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
@@ -10120,7 +10123,7 @@ mod tests {
                let nodes = create_network(2, &node_cfg, &node_chanmgr);
 
                // Dummy values
-               let channel_id = [4; 32];
+               let channel_id = ChannelId::from_bytes([4; 32]);
                let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
                let intercept_id = InterceptId([0; 32]);
 
@@ -10175,11 +10178,11 @@ mod tests {
                                check_added_monitors!(nodes[0], 1);
                                expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
                        }
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
-               open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+               open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
                        open_channel_msg.temporary_channel_id);
@@ -10230,7 +10233,7 @@ mod tests {
                for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
                        nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
@@ -10270,7 +10273,7 @@ mod tests {
                for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
                        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
@@ -10322,7 +10325,7 @@ mod tests {
                                _ => panic!("Unexpected event"),
                        }
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
-                       open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
+                       open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
 
                // If we try to accept a channel from another peer non-0conf it will fail.
@@ -10538,7 +10541,7 @@ mod tests {
 
                // If we provide a channel_id not associated with the peer, we should get an error and no updates
                // should be applied to ensure update atomicity as specified in the API docs.
-               let bad_channel_id = [10; 32];
+               let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
                let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
                let new_fee = current_fee + 100;
                assert!(
@@ -10585,7 +10588,7 @@ pub mod bench {
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
 
-       use crate::sync::{Arc, Mutex};
+       use crate::sync::{Arc, Mutex, RwLock};
 
        use criterion::Criterion;
 
@@ -10622,7 +10625,7 @@ pub mod bench {
                let tx_broadcaster = test_utils::TestBroadcaster::new(network);
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
 
                let mut config: UserConfig = Default::default();
index 34568b07cad5e4d5dd5f13bb4404e17b6c8948d0..b8c2e085999c0dbdb978bbf2b85093580b464a37 100644 (file)
@@ -16,14 +16,14 @@ use crate::chain::channelmonitor::ChannelMonitor;
 use crate::chain::transaction::OutPoint;
 use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
 use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
 use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
 use crate::routing::router::{self, PaymentParameters, Route};
 use crate::ln::features::InitFeatures;
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::scid_utils;
 use crate::util::test_utils;
 use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
@@ -43,7 +43,7 @@ use crate::io;
 use crate::prelude::*;
 use core::cell::RefCell;
 use alloc::rc::Rc;
-use crate::sync::{Arc, Mutex, LockTestExt};
+use crate::sync::{Arc, Mutex, LockTestExt, RwLock};
 use core::mem;
 use core::iter::repeat;
 use bitcoin::{PackedLockTime, TxMerkleNode};
@@ -352,7 +352,7 @@ pub struct TestChanMonCfg {
        pub persister: test_utils::TestPersister,
        pub logger: test_utils::TestLogger,
        pub keys_manager: test_utils::TestKeysInterface,
-       pub scorer: Mutex<test_utils::TestScorer>,
+       pub scorer: RwLock<test_utils::TestScorer>,
 }
 
 pub struct NodeCfg<'a> {
@@ -520,7 +520,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                for outpoint in self.chain_monitor.chain_monitor.list_monitors() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap();
-                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+                                       let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
                                                &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap();
                                        deserialized_monitors.push(deserialized_monitor);
                                }
@@ -539,7 +539,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        channel_monitors.insert(monitor.get_funding_txo().0, monitor);
                                }
 
-                               let scorer = Mutex::new(test_utils::TestScorer::new());
+                               let scorer = RwLock::new(test_utils::TestScorer::new());
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
                                <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
@@ -570,11 +570,11 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
        }
 }
 
-pub fn create_chan_between_nodes<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
 }
 
-pub fn create_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        let (channel_ready, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &channel_ready);
        (announcement, as_update, bs_update, channel_id, tx)
@@ -868,7 +868,7 @@ macro_rules! get_monitor {
                        for index in 0..2 {
                                if let Ok(mon) = $node.chain_monitor.chain_monitor.get_monitor(
                                        $crate::chain::transaction::OutPoint {
-                                               txid: bitcoin::Txid::from_slice(&$channel_id[..]).unwrap(), index
+                                               txid: bitcoin::Txid::from_slice(&$channel_id.0[..]).unwrap(), index
                                        })
                                {
                                        monitor = Some(mon);
@@ -952,7 +952,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
        let mut monitors_read = Vec::with_capacity(monitors_encoded.len());
        for encoded in monitors_encoded {
                let mut monitor_read = &encoded[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>
                        ::read(&mut monitor_read, (node.keys_manager, node.keys_manager)).unwrap();
                assert!(monitor_read.is_empty());
                monitors_read.push(monitor);
@@ -1005,7 +1005,7 @@ macro_rules! reload_node {
        };
 }
 
-pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> ([u8; 32], Transaction, OutPoint) {
+pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_counterparty_node_id: &PublicKey, expected_chan_value: u64, expected_user_chan_id: u128) -> (ChannelId, Transaction, OutPoint) {
        let chan_id = *node.network_chan_count.borrow();
 
        let events = node.node.get_and_clear_pending_events();
@@ -1025,7 +1025,7 @@ pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_
                _ => panic!("Unexpected event"),
        }
 }
-pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: [u8; 32]) -> Transaction {
+pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId) -> Transaction {
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42);
        assert_eq!(temporary_channel_id, expected_temporary_channel_id);
 
@@ -1068,7 +1068,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &
 }
 
 // Receiver must have been initialized with manually_accept_inbound_channels set to true.
-pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> (bitcoin::Transaction, [u8; 32]) {
+pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, receiver: &'a Node<'b, 'c, 'd>, initiator_config: Option<UserConfig>) -> (bitcoin::Transaction, ChannelId) {
        let initiator_channels = initiator.node.list_usable_channels().len();
        let receiver_channels = receiver.node.list_usable_channels().len();
 
@@ -1166,7 +1166,7 @@ pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_r
        node_recv.node.handle_channel_ready(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendChannelReady, node_recv.node.get_our_node_id()));
 }
 
-pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) {
        let channel_id;
        let events_6 = node_conf.node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 3);
@@ -1193,7 +1193,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv
        }), channel_id)
 }
 
-pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId) {
        let conf_height = core::cmp::max(node_a.best_block_info().1 + 1, node_b.best_block_info().1 + 1);
        create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height);
        confirm_transaction_at(node_a, tx, conf_height);
@@ -1202,7 +1202,7 @@ pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a
        create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 }
 
-pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64) -> ((msgs::ChannelReady, msgs::AnnouncementSignatures), ChannelId, Transaction) {
        let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat);
        let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
        (msgs, chan_id, tx)
@@ -1242,11 +1242,11 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b,
        ((*announcement).clone(), as_update, bs_update)
 }
 
-pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
 }
 
-pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction) {
        let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
        update_nodes_with_chan_announce(nodes, a, b, &chan_announcement.0, &chan_announcement.1, &chan_announcement.2);
        (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
@@ -1478,7 +1478,7 @@ macro_rules! check_closed_event {
        }
 }
 
-pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
+pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &ChannelId, funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b, struct_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) } else { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) };
        let (tx_a, tx_b);
@@ -2634,7 +2634,7 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
                let persister = test_utils::TestPersister::new();
                let seed = [i as u8; 32];
                let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
 
                chan_mon_cfgs.push(TestChanMonCfg { tx_broadcaster, fee_estimator, chain_source, logger, persister, keys_manager, scorer });
        }
@@ -2646,7 +2646,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMon
        create_node_cfgs_with_persisters(node_count, chanmon_cfgs, chanmon_cfgs.iter().map(|c| &c.persister).collect())
 }
 
-pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<EnforcingSigner>>) -> Vec<NodeCfg<'a>> {
+pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>, persisters: Vec<&'a impl Persist<TestChannelSigner>>) -> Vec<NodeCfg<'a>> {
        let mut nodes = Vec::new();
 
        for i in 0..node_count {
@@ -2766,7 +2766,7 @@ pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
 ///
 /// All broadcast transactions must be accounted for in one of the above three types of we'll
 /// also fail.
-pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
+pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, ChannelId, Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
        let mut txn_seen = HashSet::new();
        node_txn.retain(|tx| txn_seen.insert(tx.txid()));
index 2fbc36ce9a5b365d1b548f2fa5038722ab1db625..d3401e5f0681a73c77157f9756169fe4c9229158 100644 (file)
@@ -19,7 +19,7 @@ use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCK
 use crate::chain::transaction::OutPoint;
 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
-use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
+use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
@@ -30,7 +30,7 @@ use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils::{self, WatchtowerPersister};
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -56,7 +56,7 @@ use alloc::collections::BTreeSet;
 use core::default::Default;
 use core::iter::repeat;
 use bitcoin::hashes::Hash;
-use crate::sync::{Arc, Mutex};
+use crate::sync::{Arc, Mutex, RwLock};
 
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
@@ -696,7 +696,7 @@ fn test_update_fee_that_funder_cannot_afford() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
@@ -1409,7 +1409,7 @@ fn test_fee_spike_violation_fails_htlc() {
 
        const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
+       // Get the TestChannelSigner for each channel, which will be used to (1) get the keys
        // needed to sign the new commitment tx and (2) sign the new commitment tx.
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
@@ -1505,7 +1505,7 @@ fn test_fee_spike_violation_fails_htlc() {
                _ => panic!("Unexpected event"),
        };
        nodes[1].logger.assert_log("lightning::ln::channel".to_string(),
-               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", ::hex::encode(raa_msg.channel_id)), 1);
+               format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
 
        check_added_monitors!(nodes[1], 2);
 }
@@ -5434,7 +5434,7 @@ fn test_key_derivation_params() {
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
-       let scorer = Mutex::new(test_utils::TestScorer::new());
+       let scorer = RwLock::new(test_utils::TestScorer::new());
        let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
        let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
@@ -5835,7 +5835,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
        // us to surface its failure to the user.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5923,7 +5923,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        // to surface its failure to the user. The first payment should succeed.
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
+       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -7639,7 +7639,7 @@ fn test_counterparty_raa_skip_no_crash() {
        // commitment transaction, we would have happily carried on and provided them the next
        // commitment transaction based on one RAA forward. This would probably eventually have led to
        // channel closure, but it would not have resulted in funds loss. Still, our
-       // EnforcingSigner would have panicked as it doesn't like jumps into the future. Here, we
+       // TestChannelSigner would have panicked as it doesn't like jumps into the future. Here, we
        // check simply that the channel is closed in response to such an RAA, but don't check whether
        // we decide to punish our counterparty for revoking their funds (as we don't currently
        // implement that).
@@ -8003,7 +8003,7 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
        let nodes = create_network(2, &node_cfg, &node_chanmgr);
 
-       let unknown_channel_id = [0; 32];
+       let unknown_channel_id = ChannelId::new_zero();
        let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
        match api_res {
                Err(APIError::APIMisuseError { err }) => {
@@ -8350,7 +8350,7 @@ fn test_update_err_monitor_lockdown() {
        let watchtower = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8420,7 +8420,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_alice = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -8451,7 +8451,7 @@ fn test_concurrent_monitor_claim() {
        let watchtower_bob = {
                let new_monitor = {
                        let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                                        &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
                        assert!(new_monitor == *monitor);
                        new_monitor
@@ -9017,7 +9017,7 @@ fn test_error_chans_closed() {
 
        // A null channel ID should close all channels
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
-       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+       nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
                [nodes[1].node.get_our_node_id(); 2], 100000);
index a5de0d4c2f5d77a70d254cca647808c77563ac50..bacb9e86c791bf61dee44246751c4f85c0e0f366 100644 (file)
@@ -13,6 +13,7 @@
 #[macro_use]
 pub mod functional_test_utils;
 
+pub mod channel_id;
 pub mod channelmanager;
 pub mod inbound_payment;
 pub mod msgs;
@@ -31,6 +32,9 @@ pub mod channel;
 #[cfg(not(fuzzing))]
 pub(crate) mod channel;
 
+// Re-export ChannelId
+pub use self::channel_id::ChannelId;
+
 pub(crate) mod onion_utils;
 mod outbound_payment;
 pub mod wire;
index 728752f97c310bc478abc213d165fc347a2db407..58878a9998c8a3b5aa2d87b8e14bc294c4574335 100644 (file)
@@ -2191,7 +2191,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
 
        // Alice should see that Bob is trying to claim to HTLCs, so she should now try to claim them at
        // the second level instead.
-       let revoked_claims = {
+       let revoked_claim_transactions = {
                let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(txn.len(), 2);
 
@@ -2205,10 +2205,14 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        check_spends!(revoked_htlc_claim, htlc_tx);
                }
 
-               txn
+               let mut revoked_claim_transaction_map = HashMap::new();
+               for current_tx in txn.into_iter() {
+                       revoked_claim_transaction_map.insert(current_tx.txid(), current_tx);
+               }
+               revoked_claim_transaction_map
        };
        for node in &nodes {
-               mine_transactions(node, &revoked_claims.iter().collect::<Vec<_>>());
+               mine_transactions(node, &revoked_claim_transactions.values().collect::<Vec<_>>());
        }
 
 
@@ -2234,7 +2238,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
                        let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(
                                &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, None, &Secp256k1::new(),
                        ).unwrap();
-                       check_spends!(spend_tx, revoked_claims[idx]);
+
+                       check_spends!(spend_tx, revoked_claim_transactions.get(&spend_tx.input[0].previous_output.txid).unwrap());
                } else {
                        panic!("unexpected event");
                }
index dc8d9215438e09a4eb505a4106f15083003519bc..89ded2168bcc33bd8d4c1622dab6986231967671 100644 (file)
@@ -31,6 +31,7 @@ use bitcoin::{secp256k1, Witness};
 use bitcoin::blockdata::script::Script;
 use bitcoin::hash_types::{Txid, BlockHash};
 
+use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 use crate::ln::onion_utils;
 use crate::onion_message;
@@ -45,8 +46,6 @@ use crate::events::{MessageSendEventsProvider, OnionMessageProvider};
 use crate::util::logger;
 use crate::util::ser::{LengthReadable, Readable, ReadableArgs, Writeable, Writer, WithoutLength, FixedLengthReader, HighZeroBytesDroppedBigSize, Hostname, TransactionU16LenLimited, BigSize};
 
-use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-
 use crate::routing::gossip::{NodeAlias, NodeId};
 
 /// 21 million * 10^8 * 1000
@@ -111,7 +110,7 @@ pub struct ErrorMessage {
        ///
        /// All-0s indicates a general error unrelated to a specific channel, after which all channels
        /// with the sending peer should be closed.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A possibly human-readable error description.
        ///
        /// The string should be sanitized before it is used (e.g., emitted to logs or printed to
@@ -128,7 +127,7 @@ pub struct WarningMessage {
        /// The channel ID involved in the warning.
        ///
        /// All-0s indicates a warning unrelated to a specific channel.
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A possibly human-readable warning description.
        ///
        /// The string should be sanitized before it is used (e.g. emitted to logs or printed to
@@ -171,7 +170,7 @@ pub struct OpenChannel {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
        /// A temporary channel ID, until the funding outpoint is announced
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The channel value
        pub funding_satoshis: u64,
        /// The amount to push to the counterparty as part of the open, in milli-satoshi
@@ -225,7 +224,7 @@ pub struct OpenChannelV2 {
        /// The genesis hash of the blockchain where the channel is to be opened
        pub chain_hash: BlockHash,
        /// A temporary channel ID derived using a zeroed out value for the channel acceptor's revocation basepoint
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The feerate for the funding transaction set by the channel initiator
        pub funding_feerate_sat_per_1000_weight: u32,
        /// The feerate for the commitment transaction set by the channel initiator
@@ -282,7 +281,7 @@ pub struct OpenChannelV2 {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AcceptChannel {
        /// A temporary channel ID, until the funding outpoint is announced
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The threshold below which outputs on transactions broadcast by sender will be omitted
        pub dust_limit_satoshis: u64,
        /// The maximum inbound HTLC value in flight towards sender, in milli-satoshi
@@ -330,7 +329,7 @@ pub struct AcceptChannel {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AcceptChannelV2 {
        /// The same `temporary_channel_id` received from the initiator's `open_channel2` message.
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// Part of the channel value contributed by the channel acceptor
        pub funding_satoshis: u64,
        /// The threshold below which outputs on transactions broadcast by the channel acceptor will be
@@ -383,7 +382,7 @@ pub struct AcceptChannelV2 {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingCreated {
        /// A temporary channel ID, until the funding is established
-       pub temporary_channel_id: [u8; 32],
+       pub temporary_channel_id: ChannelId,
        /// The funding transaction ID
        pub funding_txid: Txid,
        /// The specific output index funding this channel
@@ -406,7 +405,7 @@ pub struct FundingCreated {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct FundingSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The signature of the channel acceptor (fundee) on the initial commitment transaction
        pub signature: Signature,
        #[cfg(taproot)]
@@ -420,7 +419,7 @@ pub struct FundingSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReady {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The per-commitment point of the second commitment transaction
        pub next_per_commitment_point: PublicKey,
        /// If set, provides a `short_channel_id` alias for this channel.
@@ -436,7 +435,7 @@ pub struct ChannelReady {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAddInput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A randomly chosen unique identifier for this input, which is even for initiators and odd for
        /// non-initiators.
        pub serial_id: u64,
@@ -455,7 +454,7 @@ pub struct TxAddInput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAddOutput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A randomly chosen unique identifier for this output, which is even for initiators and odd for
        /// non-initiators.
        pub serial_id: u64,
@@ -471,7 +470,7 @@ pub struct TxAddOutput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxRemoveInput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The serial ID of the input to be removed
        pub serial_id: u64,
 }
@@ -482,7 +481,7 @@ pub struct TxRemoveInput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxRemoveOutput {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The serial ID of the output to be removed
        pub serial_id: u64,
 }
@@ -494,7 +493,7 @@ pub struct TxRemoveOutput {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxComplete {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
 }
 
 /// A tx_signatures message containing the sender's signatures for a transaction constructed with
@@ -504,7 +503,7 @@ pub struct TxComplete {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxSignatures {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The TXID
        pub tx_hash: Txid,
        /// The list of witnesses
@@ -518,7 +517,7 @@ pub struct TxSignatures {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxInitRbf {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The locktime of the transaction
        pub locktime: u32,
        /// The feerate of the transaction
@@ -535,7 +534,7 @@ pub struct TxInitRbf {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAckRbf {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The number of satoshis the sender will contribute to or, if negative, remove from
        /// (e.g. splice-out) the funding output of the transaction
        pub funding_output_contribution: Option<i64>,
@@ -547,7 +546,7 @@ pub struct TxAckRbf {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct TxAbort {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Message data
        pub data: Vec<u8>,
 }
@@ -558,7 +557,7 @@ pub struct TxAbort {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct Shutdown {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The destination of this peer's funds on closing.
        ///
        /// Must be in one of these forms: P2PKH, P2SH, P2WPKH, P2WSH, P2TR.
@@ -585,7 +584,7 @@ pub struct ClosingSignedFeeRange {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ClosingSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The proposed total fee for the closing transaction
        pub fee_satoshis: u64,
        /// A signature on the closing transaction
@@ -601,7 +600,7 @@ pub struct ClosingSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateAddHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        /// The HTLC value in milli-satoshi
@@ -634,7 +633,7 @@ pub struct OnionMessage {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFulfillHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        /// The pre-image of the payment hash, allowing HTLC redemption
@@ -647,7 +646,7 @@ pub struct UpdateFulfillHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        pub(crate) reason: OnionErrorPacket,
@@ -659,7 +658,7 @@ pub struct UpdateFailHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFailMalformedHTLC {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The HTLC ID
        pub htlc_id: u64,
        pub(crate) sha256_of_onion: [u8; 32],
@@ -673,7 +672,7 @@ pub struct UpdateFailMalformedHTLC {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct CommitmentSigned {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// A signature on the commitment transaction
        pub signature: Signature,
        /// Signatures on the HTLC transactions
@@ -689,7 +688,7 @@ pub struct CommitmentSigned {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct RevokeAndACK {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The secret corresponding to the per-commitment point
        pub per_commitment_secret: [u8; 32],
        /// The next sender-broadcast commitment transaction's per-commitment point
@@ -705,7 +704,7 @@ pub struct RevokeAndACK {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct UpdateFee {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// Fee rate per 1000-weight of the transaction
        pub feerate_per_kw: u32,
 }
@@ -716,7 +715,7 @@ pub struct UpdateFee {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct ChannelReestablish {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The next commitment number for the sender
        pub next_local_commitment_number: u64,
        /// The next commitment number for the recipient
@@ -736,7 +735,7 @@ pub struct ChannelReestablish {
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub struct AnnouncementSignatures {
        /// The channel ID
-       pub channel_id: [u8; 32],
+       pub channel_id: ChannelId,
        /// The short channel ID
        pub short_channel_id: u64,
        /// A signature by the node key
@@ -2476,6 +2475,7 @@ mod tests {
        use bitcoin::{Transaction, PackedLockTime, TxIn, Script, Sequence, Witness, TxOut};
        use hex;
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+       use crate::ln::ChannelId;
        use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
        use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
        use crate::routing::gossip::{NodeAlias, NodeId};
@@ -2506,7 +2506,7 @@ mod tests {
                };
 
                let cr = msgs::ChannelReestablish {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        next_local_commitment_number: 3,
                        next_remote_commitment_number: 4,
                        your_last_per_commitment_secret: [9;32],
@@ -2535,7 +2535,7 @@ mod tests {
                };
 
                let cr = msgs::ChannelReestablish {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        next_local_commitment_number: 3,
                        next_remote_commitment_number: 4,
                        your_last_per_commitment_secret: [9;32],
@@ -2587,7 +2587,7 @@ mod tests {
                let sig_1 = get_sig_on!(privkey, secp_ctx, String::from("01010101010101010101010101010101"));
                let sig_2 = get_sig_on!(privkey, secp_ctx, String::from("02020202020202020202020202020202"));
                let announcement_signatures = msgs::AnnouncementSignatures {
-                       channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+                       channel_id: ChannelId::from_bytes([4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0]),
                        short_channel_id: 2316138423780173,
                        node_signature: sig_1,
                        bitcoin_signature: sig_2,
@@ -2823,7 +2823,7 @@ mod tests {
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let open_channel = msgs::OpenChannel {
                        chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(),
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_satoshis: 1311768467284833366,
                        push_msat: 2536655962884945560,
                        dust_limit_satoshis: 3608586615801332854,
@@ -2884,7 +2884,7 @@ mod tests {
                let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
                let open_channelv2 = msgs::OpenChannelV2 {
                        chain_hash: BlockHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap(),
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_feerate_sat_per_1000_weight: 821716,
                        commitment_feerate_sat_per_1000_weight: 821716,
                        funding_satoshis: 1311768467284833366,
@@ -2974,7 +2974,7 @@ mod tests {
                let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let accept_channel = msgs::AcceptChannel {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        dust_limit_satoshis: 1311768467284833366,
                        max_htlc_value_in_flight_msat: 2536655962884945560,
                        channel_reserve_satoshis: 3608586615801332854,
@@ -3017,7 +3017,7 @@ mod tests {
                let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
                let (_, pubkey_7) = get_keys_from!("0707070707070707070707070707070707070707070707070707070707070707", secp_ctx);
                let accept_channelv2 = msgs::AcceptChannelV2 {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_satoshis: 1311768467284833366,
                        dust_limit_satoshis: 1311768467284833366,
                        max_htlc_value_in_flight_msat: 2536655962884945560,
@@ -3071,7 +3071,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let funding_created = msgs::FundingCreated {
-                       temporary_channel_id: [2; 32],
+                       temporary_channel_id: ChannelId::from_bytes([2; 32]),
                        funding_txid: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
                        funding_output_index: 255,
                        signature: sig_1,
@@ -3091,7 +3091,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let funding_signed = msgs::FundingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        signature: sig_1,
                        #[cfg(taproot)]
                        partial_signature_with_nonce: None,
@@ -3106,7 +3106,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let (_, pubkey_1,) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let channel_ready = msgs::ChannelReady {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        next_per_commitment_point: pubkey_1,
                        short_channel_id_alias: None,
                };
@@ -3118,7 +3118,7 @@ mod tests {
        #[test]
        fn encoding_tx_add_input() {
                let tx_add_input = msgs::TxAddInput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                        prevtx: TransactionU16LenLimited::new(Transaction {
                                version: 2,
@@ -3153,7 +3153,7 @@ mod tests {
        #[test]
        fn encoding_tx_add_output() {
                let tx_add_output = msgs::TxAddOutput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                        sats: 4886718345,
                        script: Address::from_str("bc1qxmk834g5marzm227dgqvynd23y2nvt2ztwcw2z").unwrap().script_pubkey(),
@@ -3166,7 +3166,7 @@ mod tests {
        #[test]
        fn encoding_tx_remove_input() {
                let tx_remove_input = msgs::TxRemoveInput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                };
                let encoded_value = tx_remove_input.encode();
@@ -3177,7 +3177,7 @@ mod tests {
        #[test]
        fn encoding_tx_remove_output() {
                let tx_remove_output = msgs::TxRemoveOutput {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        serial_id: 4886718345,
                };
                let encoded_value = tx_remove_output.encode();
@@ -3188,7 +3188,7 @@ mod tests {
        #[test]
        fn encoding_tx_complete() {
                let tx_complete = msgs::TxComplete {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                };
                let encoded_value = tx_complete.encode();
                let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap();
@@ -3198,7 +3198,7 @@ mod tests {
        #[test]
        fn encoding_tx_signatures() {
                let tx_signatures = msgs::TxSignatures {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        tx_hash: Txid::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
                        witnesses: vec![
                                Witness::from_vec(vec![
@@ -3232,7 +3232,7 @@ mod tests {
 
        fn do_encoding_tx_init_rbf(funding_value_with_hex_target: Option<(i64, &str)>) {
                let tx_init_rbf = msgs::TxInitRbf {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        locktime: 305419896,
                        feerate_sat_per_1000_weight: 20190119,
                        funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None },
@@ -3258,7 +3258,7 @@ mod tests {
 
        fn do_encoding_tx_ack_rbf(funding_value_with_hex_target: Option<(i64, &str)>) {
                let tx_ack_rbf = msgs::TxAckRbf {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        funding_output_contribution: if let Some((value, _)) = funding_value_with_hex_target { Some(value) } else { None },
                };
                let encoded_value = tx_ack_rbf.encode();
@@ -3281,7 +3281,7 @@ mod tests {
        #[test]
        fn encoding_tx_abort() {
                let tx_abort = msgs::TxAbort {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: hex::decode("54686520717569636B2062726F776E20666F78206A756D7073206F76657220746865206C617A7920646F672E").unwrap(),
                };
                let encoded_value = tx_abort.encode();
@@ -3294,7 +3294,7 @@ mod tests {
                let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let script = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
                let shutdown = msgs::Shutdown {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        scriptpubkey:
                                     if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, inner: pubkey_1}, Network::Testnet).script_pubkey() }
                                else if script_type == 2 { Address::p2sh(&script, Network::Testnet).unwrap().script_pubkey() }
@@ -3329,7 +3329,7 @@ mod tests {
                let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
                let closing_signed = msgs::ClosingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        fee_satoshis: 2316138423780173,
                        signature: sig_1,
                        fee_range: None,
@@ -3340,7 +3340,7 @@ mod tests {
                assert_eq!(msgs::ClosingSigned::read(&mut Cursor::new(&target_value)).unwrap(), closing_signed);
 
                let closing_signed_with_range = msgs::ClosingSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        fee_satoshis: 2316138423780173,
                        signature: sig_1,
                        fee_range: Some(msgs::ClosingSignedFeeRange {
@@ -3366,7 +3366,7 @@ mod tests {
                        hmac: [2; 32]
                };
                let update_add_htlc = msgs::UpdateAddHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        amount_msat: 3608586615801332854,
                        payment_hash: PaymentHash([1; 32]),
@@ -3382,7 +3382,7 @@ mod tests {
        #[test]
        fn encoding_update_fulfill_htlc() {
                let update_fulfill_htlc = msgs::UpdateFulfillHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        payment_preimage: PaymentPreimage([1; 32]),
                };
@@ -3397,7 +3397,7 @@ mod tests {
                        data: [1; 32].to_vec(),
                };
                let update_fail_htlc = msgs::UpdateFailHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        reason
                };
@@ -3409,7 +3409,7 @@ mod tests {
        #[test]
        fn encoding_update_fail_malformed_htlc() {
                let update_fail_malformed_htlc = msgs::UpdateFailMalformedHTLC {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        htlc_id: 2316138423780173,
                        sha256_of_onion: [1; 32],
                        failure_code: 255
@@ -3430,7 +3430,7 @@ mod tests {
                let sig_3 = get_sig_on!(privkey_3, secp_ctx, String::from("01010101010101010101010101010101"));
                let sig_4 = get_sig_on!(privkey_4, secp_ctx, String::from("01010101010101010101010101010101"));
                let commitment_signed = msgs::CommitmentSigned {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        signature: sig_1,
                        htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() },
                        #[cfg(taproot)]
@@ -3457,7 +3457,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
                let raa = msgs::RevokeAndACK {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        per_commitment_secret: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
                        next_per_commitment_point: pubkey_1,
                        #[cfg(taproot)]
@@ -3471,7 +3471,7 @@ mod tests {
        #[test]
        fn encoding_update_fee() {
                let update_fee = msgs::UpdateFee {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        feerate_per_kw: 20190119,
                };
                let encoded_value = update_fee.encode();
@@ -3518,7 +3518,7 @@ mod tests {
        #[test]
        fn encoding_error() {
                let error = msgs::ErrorMessage {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: String::from("rust-lightning"),
                };
                let encoded_value = error.encode();
@@ -3529,7 +3529,7 @@ mod tests {
        #[test]
        fn encoding_warning() {
                let error = msgs::WarningMessage {
-                       channel_id: [2; 32],
+                       channel_id: ChannelId::from_bytes([2; 32]),
                        data: String::from("rust-lightning"),
                };
                let encoded_value = error.encode();
index 5b18acfe3b34c2779f3199435b213f1117df79f4..f60bf565efa35ddadac28416f2a7da22233d4892 100644 (file)
@@ -722,6 +722,8 @@ impl OutboundPayments {
        {
                #[cfg(feature = "std")] {
                        if has_expired(&route_params) {
+                               log_error!(logger, "Payment with id {} and hash {} had expired before we started paying",
+                                       payment_id, payment_hash);
                                return Err(RetryableSendFailure::PaymentExpired)
                        }
                }
@@ -730,16 +732,25 @@ impl OutboundPayments {
                        &node_signer.get_node_id(Recipient::Node).unwrap(), &route_params,
                        Some(&first_hops.iter().collect::<Vec<_>>()), inflight_htlcs(),
                        payment_hash, payment_id,
-               ).map_err(|_| RetryableSendFailure::RouteNotFound)?;
+               ).map_err(|_| {
+                       log_error!(logger, "Failed to find route for payment with id {} and hash {}",
+                               payment_id, payment_hash);
+                       RetryableSendFailure::RouteNotFound
+               })?;
 
                let onion_session_privs = self.add_new_pending_payment(payment_hash,
                        recipient_onion.clone(), payment_id, keysend_preimage, &route, Some(retry_strategy),
                        Some(route_params.payment_params.clone()), entropy_source, best_block_height)
-                       .map_err(|_| RetryableSendFailure::DuplicatePayment)?;
+                       .map_err(|_| {
+                               log_error!(logger, "Payment with id {} is already pending. New payment had payment hash {}",
+                                       payment_id, payment_hash);
+                               RetryableSendFailure::DuplicatePayment
+                       })?;
 
                let res = self.pay_route_internal(&route, payment_hash, recipient_onion, keysend_preimage, payment_id, None,
                        onion_session_privs, node_signer, best_block_height, &send_payment_along_path);
-               log_info!(logger, "Result sending payment with id {}: {:?}", &payment_id, res);
+               log_info!(logger, "Sending payment with id {} and hash {} returned {:?}",
+                       payment_id, payment_hash, res);
                if let Err(e) = res {
                        self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path);
                }
@@ -1188,6 +1199,7 @@ impl OutboundPayments {
                if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
                        if !payment.get().is_fulfilled() {
                                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+                               log_info!(logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash);
                                let fee_paid_msat = payment.get().get_pending_fee_msat();
                                pending_events.push_back((events::Event::PaymentSent {
                                        payment_id: Some(payment_id),
@@ -1504,7 +1516,7 @@ mod tests {
        use crate::ln::outbound_payment::{OutboundPayments, Retry, RetryableSendFailure};
        use crate::routing::gossip::NetworkGraph;
        use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters};
-       use crate::sync::{Arc, Mutex};
+       use crate::sync::{Arc, Mutex, RwLock};
        use crate::util::errors::APIError;
        use crate::util::test_utils;
 
@@ -1543,7 +1555,7 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
@@ -1590,7 +1602,7 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
@@ -1632,7 +1644,7 @@ mod tests {
                let outbound_payments = OutboundPayments::new();
                let logger = test_utils::TestLogger::new();
                let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
-               let scorer = Mutex::new(test_utils::TestScorer::new());
+               let scorer = RwLock::new(test_utils::TestScorer::new());
                let router = test_utils::TestRouter::new(network_graph, &scorer);
                let secp_ctx = Secp256k1::new();
                let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
index c266285713378648fffaebc7606ed22e154b1dcf..f0c3fa92d37c4b07f3c40d641e011e70d8b87c2e 100644 (file)
@@ -19,7 +19,7 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
 use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::ln::{msgs, PaymentSecret, PaymentPreimage};
+use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
 use crate::ln::msgs::ChannelMessageHandler;
 use crate::ln::outbound_payment::Retry;
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
@@ -1703,7 +1703,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
        };
 
        // Check for unknown channel id error.
-       let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
+       let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
        assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  {
                err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
                        log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
@@ -1732,7 +1732,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
                assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
                        err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
-                               log_bytes!(temp_chan_id), nodes[2].node.get_our_node_id()) });
+                               &temp_chan_id, nodes[2].node.get_our_node_id()) });
                assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
 
                // Open the just-in-time channel so the payment can then be forwarded.
@@ -2544,7 +2544,7 @@ fn retry_multi_path_single_failed_payment() {
                }, Ok(route.clone()));
 
        {
-               let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
+               let scorer = chanmon_cfgs[0].scorer.read().unwrap();
                // The initial send attempt, 2 paths
                scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
                scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
index 1a39bbb3ae408e7047159e6d9f3f63267188b61f..7565246fe3473fb8b5a78b345436b93ffd357231 100644 (file)
@@ -20,6 +20,7 @@ use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey};
 
 use crate::sign::{KeysManager, NodeSigner, Recipient};
 use crate::events::{MessageSendEvent, MessageSendEventsProvider, OnionMessageProvider};
+use crate::ln::ChannelId;
 use crate::ln::features::{InitFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
@@ -186,7 +187,7 @@ impl ErroringMessageHandler {
        pub fn new() -> Self {
                Self { message_queue: Mutex::new(Vec::new()) }
        }
-       fn push_error(&self, node_id: &PublicKey, channel_id: [u8; 32]) {
+       fn push_error(&self, node_id: &PublicKey, channel_id: ChannelId) {
                self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError {
                        action: msgs::ErrorAction::SendErrorMessage {
                                msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() },
@@ -1420,13 +1421,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                                                }
                                                                                                (msgs::DecodeError::UnsupportedCompression, _) => {
                                                                                                        log_gossip!(self.logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message");
-                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: [0; 32], data: "Unsupported message compression: zlib".to_owned() });
+                                                                                                       self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() });
                                                                                                        continue;
                                                                                                }
                                                                                                (_, Some(ty)) if is_gossip_msg(ty) => {
                                                                                                        log_gossip!(self.logger, "Got an invalid value while deserializing a gossip message");
                                                                                                        self.enqueue_message(peer, &msgs::WarningMessage {
-                                                                                                               channel_id: [0; 32],
+                                                                                                               channel_id: ChannelId::new_zero(),
                                                                                                                data: format!("Unreadable/bogus gossip message of type {}", ty),
                                                                                                        });
                                                                                                        continue;
@@ -1592,7 +1593,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                        wire::Message::Error(msg) => {
                                log_debug!(self.logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data));
                                self.message_handler.chan_handler.handle_error(&their_node_id, &msg);
-                               if msg.channel_id == [0; 32] {
+                               if msg.channel_id.is_zero() {
                                        return Err(PeerHandleError { }.into());
                                }
                        },
@@ -1913,31 +1914,31 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAcceptChannelV2 { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAcceptChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendOpenChannelV2 { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendOpenChannelV2 event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id));
+                                                                       &msg.temporary_channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.temporary_channel_id),
+                                                                       &msg.temporary_channel_id,
                                                                        log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
                                                        // TODO: If the peer is gone we should generate a DiscardFunding event
                                                        // indicating to the wallet that they should just throw away this funding transaction
@@ -1946,73 +1947,73 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendChannelReady event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAddInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAddOutput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAddOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveInput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxRemoveInput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxRemoveOutput { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxRemoveOutput event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxComplete { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxComplete event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxSignatures { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxSignatures event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxInitRbf { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxInitRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAckRbf { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAckRbf event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendTxAbort { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendTxAbort event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
@@ -2021,7 +2022,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                                        update_add_htlcs.len(),
                                                                        update_fulfill_htlcs.len(),
                                                                        update_fail_htlcs.len(),
-                                                                       log_bytes!(commitment_signed.channel_id));
+                                                                       &commitment_signed.channel_id);
                                                        let mut peer = get_peer_for_forwarding!(node_id);
                                                        for msg in update_add_htlcs {
                                                                self.enqueue_message(&mut *peer, msg);
@@ -2043,25 +2044,25 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling Shutdown event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
                                                        log_debug!(self.logger, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
                                                                        log_pubkey!(node_id),
-                                                                       log_bytes!(msg.channel_id));
+                                                                       &msg.channel_id);
                                                        self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg);
                                                },
                                                MessageSendEvent::SendChannelAnnouncement { ref node_id, ref msg, ref update_msg } => {
@@ -2087,7 +2088,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, OM: Deref, L: Deref, CM
                                                        }
                                                },
                                                MessageSendEvent::BroadcastChannelUpdate { msg } => {
-                                                       log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+                                                       log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for contents {:?}", msg.contents);
                                                        match self.message_handler.route_handler.handle_channel_update(&msg) {
                                                                Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) =>
                                                                        self.forward_broadcast_msg(peers, &wire::Message::ChannelUpdate(msg), None),
@@ -2482,6 +2483,7 @@ mod tests {
        use crate::sign::{NodeSigner, Recipient};
        use crate::events;
        use crate::io;
+       use crate::ln::ChannelId;
        use crate::ln::features::{InitFeatures, NodeFeatures};
        use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
        use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
@@ -2721,7 +2723,7 @@ mod tests {
                                                        .push(crate::events::MessageSendEvent::SendShutdown {
                                                                node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(),
                                                                msg: msgs::Shutdown {
-                                                                       channel_id: [0; 32],
+                                                                       channel_id: ChannelId::new_zero(),
                                                                        scriptpubkey: bitcoin::Script::new(),
                                                                },
                                                        });
@@ -2729,7 +2731,7 @@ mod tests {
                                                        .push(crate::events::MessageSendEvent::SendShutdown {
                                                                node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(),
                                                                msg: msgs::Shutdown {
-                                                                       channel_id: [0; 32],
+                                                                       channel_id: ChannelId::new_zero(),
                                                                        scriptpubkey: bitcoin::Script::new(),
                                                                },
                                                        });
@@ -2858,7 +2860,7 @@ mod tests {
 
                let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap();
 
-               let msg = msgs::Shutdown { channel_id: [42; 32], scriptpubkey: bitcoin::Script::new() };
+               let msg = msgs::Shutdown { channel_id: ChannelId::from_bytes([42; 32]), scriptpubkey: bitcoin::Script::new() };
                a_chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::SendShutdown {
                        node_id: their_id, msg: msg.clone()
                });
index c452630e184529969dd6f45a5f33fd5f9fd4b0f0..eda517e087eb56a12e23ed68be2d6efbf43d1f45 100644 (file)
@@ -18,7 +18,7 @@ use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, Mes
 use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
-use crate::util::enforcing_trait_impls::EnforcingSigner;
+use crate::util::test_channel_signer::TestChannelSigner;
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -399,7 +399,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_stale_monitors.push(monitor);
        }
@@ -407,7 +407,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
                let mut read = &serialized[..];
-               let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
+               let (_, monitor) = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(&mut read, (keys_manager, keys_manager)).unwrap();
                assert!(read.is_empty());
                node_0_monitors.push(monitor);
        }
index dc02206db24efbece6f434832d0061c113e563bf..64f3585b95332d464b7ec593c64d88152369a36f 100644 (file)
@@ -23,6 +23,7 @@ use bitcoin::network::constants::Network;
 use bitcoin::blockdata::constants::genesis_block;
 
 use crate::events::{MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::ChannelId;
 use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
 use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
 use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
@@ -254,7 +255,7 @@ pub struct P2PGossipSync<G: Deref<Target=NetworkGraph<L>>, U: Deref, L: Deref>
 where U::Target: UtxoLookup, L::Target: Logger
 {
        network_graph: G,
-       utxo_lookup: Option<U>,
+       utxo_lookup: RwLock<Option<U>>,
        #[cfg(feature = "std")]
        full_syncs_requested: AtomicUsize,
        pending_events: Mutex<Vec<MessageSendEvent>>,
@@ -273,7 +274,7 @@ where U::Target: UtxoLookup, L::Target: Logger
                        network_graph,
                        #[cfg(feature = "std")]
                        full_syncs_requested: AtomicUsize::new(0),
-                       utxo_lookup,
+                       utxo_lookup: RwLock::new(utxo_lookup),
                        pending_events: Mutex::new(vec![]),
                        logger,
                }
@@ -282,8 +283,8 @@ where U::Target: UtxoLookup, L::Target: Logger
        /// Adds a provider used to check new announcements. Does not affect
        /// existing announcements unless they are updated.
        /// Add, update or remove the provider would replace the current one.
-       pub fn add_utxo_lookup(&mut self, utxo_lookup: Option<U>) {
-               self.utxo_lookup = utxo_lookup;
+       pub fn add_utxo_lookup(&self, utxo_lookup: Option<U>) {
+               *self.utxo_lookup.write().unwrap() = utxo_lookup;
        }
 
        /// Gets a reference to the underlying [`NetworkGraph`] which was provided in
@@ -382,7 +383,7 @@ macro_rules! secp_verify_sig {
                                        err: format!("Invalid signature on {} message", $msg_type),
                                        action: ErrorAction::SendWarningMessage {
                                                msg: msgs::WarningMessage {
-                                                       channel_id: [0; 32],
+                                                       channel_id: ChannelId::new_zero(),
                                                        data: format!("Invalid signature on {} message", $msg_type),
                                                },
                                                log_level: Level::Trace,
@@ -400,7 +401,7 @@ macro_rules! get_pubkey_from_node_id {
                                err: format!("Invalid public key on {} message", $msg_type),
                                action: ErrorAction::SendWarningMessage {
                                        msg: msgs::WarningMessage {
-                                               channel_id: [0; 32],
+                                               channel_id: ChannelId::new_zero(),
                                                data: format!("Invalid public key on {} message", $msg_type),
                                        },
                                        log_level: Level::Trace
@@ -443,7 +444,7 @@ where U::Target: UtxoLookup, L::Target: Logger
        }
 
        fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
-               self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?;
+               self.network_graph.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?;
                Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
        }
 
index ded4a5595608c1343ad16bf569cf9f32b6ae0d23..79d54e22d6755896ee1c4ead6be0804413178a47 100644 (file)
@@ -20,22 +20,22 @@ use crate::ln::features::{Bolt11InvoiceFeatures, Bolt12InvoiceFeatures, ChannelF
 use crate::ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice};
 use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId, RoutingFees};
-use crate::routing::scoring::{ChannelUsage, LockableScore, Score};
+use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp};
 use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer};
 use crate::util::logger::{Level, Logger};
 use crate::util::chacha20::ChaCha20;
 
 use crate::io;
 use crate::prelude::*;
-use crate::sync::{Mutex};
+use crate::sync::Mutex;
 use alloc::collections::BinaryHeap;
 use core::{cmp, fmt};
-use core::ops::{Deref, DerefMut};
+use core::ops::Deref;
 
 /// A [`Router`] implemented using [`find_route`].
-pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> where
+pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        network_graph: G,
        logger: L,
@@ -44,9 +44,9 @@ pub struct DefaultRouter<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref,
        score_params: SP
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        /// Creates a new router.
        pub fn new(network_graph: G, logger: L, random_seed_bytes: [u8; 32], scorer: S, score_params: SP) -> Self {
@@ -55,9 +55,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Scor
        }
 }
 
-impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Score<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
+impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>> Router for DefaultRouter<G, L, S, SP, Sc> where
        L::Target: Logger,
-       S::Target: for <'a> LockableScore<'a, Score = Sc>,
+       S::Target: for <'a> LockableScore<'a, ScoreLookUp = Sc>,
 {
        fn find_route(
                &self,
@@ -73,7 +73,7 @@ impl< G: Deref<Target = NetworkGraph<L>>, L: Deref, S: Deref, SP: Sized, Sc: Sco
                };
                find_route(
                        payer, params, &self.network_graph, first_hops, &*self.logger,
-                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.lock().deref_mut(), &inflight_htlcs),
+                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read_lock(), &inflight_htlcs),
                        &self.score_params,
                        &random_seed_bytes
                )
@@ -106,21 +106,20 @@ pub trait Router {
        }
 }
 
-/// [`Score`] implementation that factors in in-flight HTLC liquidity.
+/// [`ScoreLookUp`] implementation that factors in in-flight HTLC liquidity.
 ///
-/// Useful for custom [`Router`] implementations to wrap their [`Score`] on-the-fly when calling
+/// Useful for custom [`Router`] implementations to wrap their [`ScoreLookUp`] on-the-fly when calling
 /// [`find_route`].
 ///
-/// [`Score`]: crate::routing::scoring::Score
-pub struct ScorerAccountingForInFlightHtlcs<'a, S: Score<ScoreParams = SP>, SP: Sized> {
-       scorer: &'a mut S,
+/// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp
+pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> {
+       scorer: S,
        // Maps a channel's short channel id and its direction to the liquidity used up.
        inflight_htlcs: &'a InFlightHtlcs,
 }
-
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> ScorerAccountingForInFlightHtlcs<'a, S, SP> {
+impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
        /// Initialize a new `ScorerAccountingForInFlightHtlcs`.
-       pub fn new(scorer: &'a mut S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
+       pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
                ScorerAccountingForInFlightHtlcs {
                        scorer,
                        inflight_htlcs
@@ -129,12 +128,12 @@ impl<'a, S: Score<ScoreParams = SP>, SP: Sized> ScorerAccountingForInFlightHtlcs
 }
 
 #[cfg(c_bindings)]
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Writeable for ScorerAccountingForInFlightHtlcs<'a, S, SP> {
+impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) }
 }
 
-impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Score for ScorerAccountingForInFlightHtlcs<'a, S, SP>  {
-       type ScoreParams = S::ScoreParams;
+impl<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
+       type ScoreParams = Sc::ScoreParams;
        fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
                if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
                        source, target, short_channel_id
@@ -149,22 +148,6 @@ impl<'a, S: Score<ScoreParams = SP>, SP: Sized> Score for ScorerAccountingForInF
                        self.scorer.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
                }
        }
-
-       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.scorer.payment_path_failed(path, short_channel_id)
-       }
-
-       fn payment_path_successful(&mut self, path: &Path) {
-               self.scorer.payment_path_successful(path)
-       }
-
-       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
-               self.scorer.probe_failed(path, short_channel_id)
-       }
-
-       fn probe_successful(&mut self, path: &Path) {
-               self.scorer.probe_successful(path)
-       }
 }
 
 /// A data structure for tracking in-flight HTLCs. May be used during pathfinding to account for
@@ -1410,7 +1393,7 @@ fn sort_first_hop_channels(
 /// [`ChannelManager::list_usable_channels`]: crate::ln::channelmanager::ChannelManager::list_usable_channels
 /// [`Event::PaymentPathFailed`]: crate::events::Event::PaymentPathFailed
 /// [`NetworkGraph`]: crate::routing::gossip::NetworkGraph
-pub fn find_route<L: Deref, GL: Deref, S: Score>(
+pub fn find_route<L: Deref, GL: Deref, S: ScoreLookUp>(
        our_node_pubkey: &PublicKey, route_params: &RouteParameters,
        network_graph: &NetworkGraph<GL>, first_hops: Option<&[&ChannelDetails]>, logger: L,
        scorer: &S, score_params: &S::ScoreParams, random_seed_bytes: &[u8; 32]
@@ -1424,7 +1407,7 @@ where L::Target: Logger, GL::Target: Logger {
        Ok(route)
 }
 
-pub(crate) fn get_route<L: Deref, S: Score>(
+pub(crate) fn get_route<L: Deref, S: ScoreLookUp>(
        our_node_pubkey: &PublicKey, payment_params: &PaymentParameters, network_graph: &ReadOnlyNetworkGraph,
        first_hops: Option<&[&ChannelDetails]>, final_value_msat: u64, logger: L, scorer: &S, score_params: &S::ScoreParams,
        _random_seed_bytes: &[u8; 32]
@@ -2037,8 +2020,14 @@ where L::Target: Logger {
                                        our_node_pubkey);
                                for details in first_channels {
                                        let first_hop_candidate = CandidateRouteHop::FirstHop { details };
-                                       add_entry!(first_hop_candidate, our_node_id, intro_node_id, 0, path_contribution_msat, 0,
-                                               0_u64, 0, 0);
+                                       let blinded_path_fee = match compute_fees(path_contribution_msat, candidate.fees()) {
+                                               Some(fee) => fee,
+                                               None => continue
+                                       };
+                                       add_entry!(first_hop_candidate, our_node_id, intro_node_id, blinded_path_fee,
+                                               path_contribution_msat, candidate.htlc_minimum_msat(), 0_u64,
+                                               candidate.cltv_expiry_delta(),
+                                               candidate.blinded_path().map_or(1, |bp| bp.blinded_hops.len() as u8));
                                }
                        }
                }
@@ -2614,7 +2603,7 @@ fn build_route_from_hops_internal<L: Deref>(
                hop_ids: [Option<NodeId>; MAX_PATH_LENGTH_ESTIMATE as usize],
        }
 
-       impl Score for HopScorer {
+       impl ScoreLookUp for HopScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, _short_channel_id: u64, source: &NodeId, target: &NodeId,
                        _usage: ChannelUsage, _score_params: &Self::ScoreParams) -> u64
@@ -2632,14 +2621,6 @@ fn build_route_from_hops_internal<L: Deref>(
                        }
                        u64::max_value()
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-
-               fn payment_path_successful(&mut self, _path: &Path) {}
-
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        impl<'a> Writeable for HopScorer {
@@ -2673,10 +2654,11 @@ mod tests {
        use crate::routing::router::{get_route, build_route_from_hops_internal, add_random_cltv_offset, default_node_features,
                BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RoutingFees,
                DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, MAX_PATH_LENGTH_ESTIMATE};
-       use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, Score, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
+       use crate::routing::scoring::{ChannelUsage, FixedPenaltyScorer, ScoreLookUp, ProbabilisticScorer, ProbabilisticScoringFeeParameters, ProbabilisticScoringDecayParameters};
        use crate::routing::test_utils::{add_channel, add_or_update_node, build_graph, build_line_graph, id_to_feature_flags, get_nodes, update_channel};
        use crate::chain::transaction::OutPoint;
        use crate::sign::EntropySource;
+       use crate::ln::ChannelId;
        use crate::ln::features::{BlindedHopFeatures, Bolt12InvoiceFeatures, ChannelFeatures, InitFeatures, NodeFeatures};
        use crate::ln::msgs::{ErrorAction, LightningError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use crate::ln::channelmanager;
@@ -2709,7 +2691,7 @@ mod tests {
        fn get_channel_details(short_channel_id: Option<u64>, node_id: PublicKey,
                        features: InitFeatures, outbound_capacity_msat: u64) -> channelmanager::ChannelDetails {
                channelmanager::ChannelDetails {
-                       channel_id: [0; 32],
+                       channel_id: ChannelId::new_zero(),
                        counterparty: channelmanager::ChannelCounterparty {
                                features,
                                node_id,
@@ -3897,7 +3879,7 @@ mod tests {
        fn available_amount_while_routing_test() {
                // Tests whether we choose the correct available channel amount while routing.
 
-               let (secp_ctx, network_graph, mut gossip_sync, chain_monitor, logger) = build_graph();
+               let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph();
                let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
                let scorer = ln_test_utils::TestScorer::new();
                let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
@@ -5720,16 +5702,11 @@ mod tests {
        impl Writeable for BadChannelScorer {
                fn write<W: Writer>(&self, _w: &mut W) -> Result<(), crate::io::Error> { unimplemented!() }
        }
-       impl Score for BadChannelScorer {
+       impl ScoreLookUp for BadChannelScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, short_channel_id: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
                        if short_channel_id == self.short_channel_id { u64::max_value() } else { 0 }
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn payment_path_successful(&mut self, _path: &Path) {}
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        struct BadNodeScorer {
@@ -5741,16 +5718,11 @@ mod tests {
                fn write<W: Writer>(&self, _w: &mut W) -> Result<(), crate::io::Error> { unimplemented!() }
        }
 
-       impl Score for BadNodeScorer {
+       impl ScoreLookUp for BadNodeScorer {
                type ScoreParams = ();
                fn channel_penalty_msat(&self, _: u64, _: &NodeId, target: &NodeId, _: ChannelUsage, _score_params:&Self::ScoreParams) -> u64 {
                        if *target == self.node_id { u64::max_value() } else { 0 }
                }
-
-               fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn payment_path_successful(&mut self, _path: &Path) {}
-               fn probe_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
-               fn probe_successful(&mut self, _path: &Path) {}
        }
 
        #[test]
@@ -6710,6 +6682,159 @@ mod tests {
                }
                assert_eq!(total_amount_paid_msat, 100_000);
        }
+
+       #[test]
+       fn direct_to_intro_node() {
+               // This previously caused a debug panic in the router when asserting
+               // `used_liquidity_msat <= hop_max_msat`, because when adding first_hop<>blinded_route_hint
+               // direct channels we failed to account for the fee charged for use of the blinded path.
+
+               // Build a graph:
+               // node0 -1(1)2 - node1
+               // such that there isn't enough liquidity to reach node1, but the router thinks there is if it
+               // doesn't account for the blinded path fee.
+
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger));
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+
+               let amt_msat = 10_000_000;
+               let (_, _, privkeys, nodes) = get_nodes(&secp_ctx);
+               add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[1],
+                       ChannelFeatures::from_le_bytes(id_to_feature_flags(1)), 1);
+               update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+                       chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                       short_channel_id: 1,
+                       timestamp: 1,
+                       flags: 0,
+                       cltv_expiry_delta: 42,
+                       htlc_minimum_msat: 1_000,
+                       htlc_maximum_msat: 10_000_000,
+                       fee_base_msat: 800,
+                       fee_proportional_millionths: 0,
+                       excess_data: Vec::new()
+               });
+               update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+                       chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+                       short_channel_id: 1,
+                       timestamp: 1,
+                       flags: 1,
+                       cltv_expiry_delta: 42,
+                       htlc_minimum_msat: 1_000,
+                       htlc_maximum_msat: 10_000_000,
+                       fee_base_msat: 800,
+                       fee_proportional_millionths: 0,
+                       excess_data: Vec::new()
+               });
+               let first_hops = vec![
+                       get_channel_details(Some(1), nodes[1], InitFeatures::from_le_bytes(vec![0b11]), 10_000_000)];
+
+               let blinded_path = BlindedPath {
+                       introduction_node_id: nodes[1],
+                       blinding_point: ln_test_utils::pubkey(42),
+                       blinded_hops: vec![
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                       ],
+               };
+               let blinded_payinfo = BlindedPayInfo {
+                       fee_base_msat: 1000,
+                       fee_proportional_millionths: 0,
+                       htlc_minimum_msat: 1000,
+                       htlc_maximum_msat: MAX_VALUE_MSAT,
+                       cltv_expiry_delta: 0,
+                       features: BlindedHopFeatures::empty(),
+               };
+               let blinded_hints = vec![(blinded_payinfo.clone(), blinded_path)];
+
+               let payment_params = PaymentParameters::blinded(blinded_hints.clone());
+
+               let netgraph = network_graph.read_only();
+               if let Err(LightningError { err, .. }) = get_route(&nodes[0], &payment_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat, Arc::clone(&logger), &scorer, &(),
+                       &random_seed_bytes) {
+                       assert_eq!(err, "Failed to find a path to the given destination");
+               } else { panic!("Expected error") }
+
+               // Sending an exact amount accounting for the blinded path fee works.
+               let amt_minus_blinded_path_fee = amt_msat - blinded_payinfo.fee_base_msat as u64;
+               let route = get_route(&nodes[0], &payment_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_minus_blinded_path_fee,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
+               assert_eq!(route.get_total_amount(), amt_minus_blinded_path_fee);
+       }
+
+       #[test]
+       fn direct_to_matching_intro_nodes() {
+               // This previously caused us to enter `unreachable` code in the following situation:
+               // 1. We add a route candidate for intro_node contributing a high amount
+               // 2. We add a first_hop<>intro_node route candidate for the same high amount
+               // 3. We see a cheaper blinded route hint for the same intro node but a much lower contribution
+               //    amount, and update our route candidate for intro_node for the lower amount
+               // 4. We then attempt to update the aforementioned first_hop<>intro_node route candidate for the
+               //    lower contribution amount, but fail (this was previously caused by failure to account for
+               //    blinded path fees when adding first_hop<>intro_node candidates)
+               // 5. We go to construct the path from these route candidates and our first_hop<>intro_node
+               //    candidate still thinks its path is contributing the original higher amount. This caused us
+               //    to hit an `unreachable` overflow when calculating the cheaper intro_node fees over the
+               //    larger amount
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(ln_test_utils::TestLogger::new());
+               let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger)));
+               let scorer = ln_test_utils::TestScorer::new();
+               let keys_manager = ln_test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+               let random_seed_bytes = keys_manager.get_secure_random_bytes();
+               let config = UserConfig::default();
+
+               // Values are taken from the fuzz input that uncovered this panic.
+               let amt_msat = 21_7020_5185_1403_2640;
+               let (_, _, _, nodes) = get_nodes(&secp_ctx);
+               let first_hops = vec![
+                       get_channel_details(Some(1), nodes[1], channelmanager::provided_init_features(&config),
+                               18446744073709551615)];
+
+               let blinded_path = BlindedPath {
+                       introduction_node_id: nodes[1],
+                       blinding_point: ln_test_utils::pubkey(42),
+                       blinded_hops: vec![
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() },
+                               BlindedHop { blinded_node_id: ln_test_utils::pubkey(42 as u8), encrypted_payload: Vec::new() }
+                       ],
+               };
+               let blinded_payinfo = BlindedPayInfo {
+                       fee_base_msat: 5046_2720,
+                       fee_proportional_millionths: 0,
+                       htlc_minimum_msat: 4503_5996_2737_0496,
+                       htlc_maximum_msat: 45_0359_9627_3704_9600,
+                       cltv_expiry_delta: 0,
+                       features: BlindedHopFeatures::empty(),
+               };
+               let mut blinded_hints = vec![
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+                       (blinded_payinfo.clone(), blinded_path.clone()),
+               ];
+               blinded_hints[1].0.fee_base_msat = 419_4304;
+               blinded_hints[1].0.fee_proportional_millionths = 257;
+               blinded_hints[1].0.htlc_minimum_msat = 280_8908_6115_8400;
+               blinded_hints[1].0.htlc_maximum_msat = 2_8089_0861_1584_0000;
+               blinded_hints[1].0.cltv_expiry_delta = 0;
+
+               let bolt12_features: Bolt12InvoiceFeatures = channelmanager::provided_invoice_features(&config).to_context();
+               let payment_params = PaymentParameters::blinded(blinded_hints.clone())
+                       .with_bolt12_features(bolt12_features.clone()).unwrap();
+
+               let netgraph = network_graph.read_only();
+               let route = get_route(&nodes[0], &payment_params, &netgraph,
+                       Some(&first_hops.iter().collect::<Vec<_>>()), amt_msat,
+                       Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
+               assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
+               assert_eq!(route.get_total_amount(), amt_msat);
+       }
 }
 
 #[cfg(all(any(test, ldk_bench), not(feature = "no-std")))]
@@ -6721,7 +6846,9 @@ pub(crate) mod bench_utils {
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
 
        use crate::chain::transaction::OutPoint;
+       use crate::routing::scoring::ScoreUpdate;
        use crate::sign::{EntropySource, KeysManager};
+       use crate::ln::ChannelId;
        use crate::ln::channelmanager::{self, ChannelCounterparty, ChannelDetails};
        use crate::ln::features::Bolt11InvoiceFeatures;
        use crate::routing::gossip::NetworkGraph;
@@ -6775,7 +6902,7 @@ pub(crate) mod bench_utils {
        #[inline]
        pub(crate) fn first_hop(node_id: PublicKey) -> ChannelDetails {
                ChannelDetails {
-                       channel_id: [0; 32],
+                       channel_id: ChannelId::new_zero(),
                        counterparty: ChannelCounterparty {
                                features: channelmanager::provided_init_features(&UserConfig::default()),
                                node_id,
@@ -6813,7 +6940,7 @@ pub(crate) mod bench_utils {
                }
        }
 
-       pub(crate) fn generate_test_routes<S: Score>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
+       pub(crate) fn generate_test_routes<S: ScoreLookUp + ScoreUpdate>(graph: &NetworkGraph<&TestLogger>, scorer: &mut S,
                score_params: &S::ScoreParams, features: Bolt11InvoiceFeatures, mut seed: u64,
                starting_amount: u64, route_count: usize,
        ) -> Vec<(ChannelDetails, PaymentParameters, u64)> {
@@ -6839,7 +6966,7 @@ pub(crate) mod bench_utils {
                                let amt = starting_amount + seed % 1_000_000;
                                let path_exists =
                                        get_route(&payer, &params, &graph.read_only(), Some(&[&first_hop]),
-                                               amt, &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok();
+                                               amt, &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok();
                                if path_exists {
                                        // ...and seed the scorer with success and failure data...
                                        seed = seed.overflowing_mul(6364136223846793005).0.overflowing_add(1).0;
@@ -6853,7 +6980,7 @@ pub(crate) mod bench_utils {
                                                        .with_bolt11_features(mpp_features).unwrap();
 
                                                let route_res = get_route(&payer, &params, &graph.read_only(),
-                                                       Some(&[&first_hop]), score_amt, &TestLogger::new(), &scorer,
+                                                       Some(&[&first_hop]), score_amt, &TestLogger::new(), scorer,
                                                        score_params, &random_seed_bytes);
                                                if let Ok(route) = route_res {
                                                        for path in route.paths {
@@ -6882,7 +7009,7 @@ pub(crate) mod bench_utils {
                // requires a too-high CLTV delta.
                route_endpoints.retain(|(first_hop, params, amt)| {
                        get_route(&payer, params, &graph.read_only(), Some(&[first_hop]), *amt,
-                               &TestLogger::new(), &scorer, score_params, &random_seed_bytes).is_ok()
+                               &TestLogger::new(), scorer, score_params, &random_seed_bytes).is_ok()
                });
                route_endpoints.truncate(route_count);
                assert_eq!(route_endpoints.len(), route_count);
@@ -6893,6 +7020,7 @@ pub(crate) mod bench_utils {
 #[cfg(ldk_bench)]
 pub mod benches {
        use super::*;
+       use crate::routing::scoring::{ScoreUpdate, ScoreLookUp};
        use crate::sign::{EntropySource, KeysManager};
        use crate::ln::channelmanager;
        use crate::ln::features::Bolt11InvoiceFeatures;
@@ -6955,7 +7083,7 @@ pub mod benches {
                        "generate_large_mpp_routes_with_probabilistic_scorer");
        }
 
-       fn generate_routes<S: Score>(
+       fn generate_routes<S: ScoreLookUp + ScoreUpdate>(
                bench: &mut Criterion, graph: &NetworkGraph<&TestLogger>, mut scorer: S,
                score_params: &S::ScoreParams, features: Bolt11InvoiceFeatures, starting_amount: u64,
                bench_name: &'static str,
index 75dae72248b13f0100a8b350459580a2957109e5..aaafbc35b6134f4edd839e13c4b3a4c7830759e2 100644 (file)
@@ -10,7 +10,7 @@
 //! Utilities for scoring payment channels.
 //!
 //! [`ProbabilisticScorer`] may be given to [`find_route`] to score payment channels during path
-//! finding when a custom [`Score`] implementation is not needed.
+//! finding when a custom [`ScoreLookUp`] implementation is not needed.
 //!
 //! # Example
 //!
@@ -65,12 +65,12 @@ use crate::util::time::Time;
 
 use crate::prelude::*;
 use core::{cmp, fmt};
-use core::cell::{RefCell, RefMut};
+use core::cell::{RefCell, RefMut, Ref};
 use core::convert::TryInto;
 use core::ops::{Deref, DerefMut};
 use core::time::Duration;
 use crate::io::{self, Read};
-use crate::sync::{Mutex, MutexGuard};
+use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
 
 /// We define Score ever-so-slightly differently based on whether we are being built for C bindings
 /// or not. For users, `LockableScore` must somehow be writeable to disk. For Rust users, this is
@@ -86,8 +86,10 @@ use crate::sync::{Mutex, MutexGuard};
 macro_rules! define_score { ($($supertrait: path)*) => {
 /// An interface used to score payment channels for path finding.
 ///
-///    Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
-pub trait Score $(: $supertrait)* {
+/// `ScoreLookUp` is used to determine the penalty for a given channel.
+///
+/// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
+pub trait ScoreLookUp $(: $supertrait)* {
        /// A configurable type which should contain various passed-in parameters for configuring the scorer,
        /// on a per-routefinding-call basis through to the scorer methods,
        /// which are used to determine the parameters for the suitability of channels for use.
@@ -103,7 +105,10 @@ pub trait Score $(: $supertrait)* {
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64;
+}
 
+/// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
+pub trait ScoreUpdate $(: $supertrait)* {
        /// Handles updating channel penalties after failing to route through a channel.
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
 
@@ -117,14 +122,16 @@ pub trait Score $(: $supertrait)* {
        fn probe_successful(&mut self, path: &Path);
 }
 
-impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
-       type ScoreParams = S::ScoreParams;
+impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supertrait)*> ScoreLookUp for T {
+       type ScoreParams = SP;
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64 {
                self.deref().channel_penalty_msat(short_channel_id, source, target, usage, score_params)
        }
+}
 
+impl<S: ScoreUpdate, T: DerefMut<Target=S> $(+ $supertrait)*> ScoreUpdate for T {
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
                self.deref_mut().payment_path_failed(path, short_channel_id)
        }
@@ -145,26 +152,35 @@ impl<S: Score, T: DerefMut<Target=S> $(+ $supertrait)*> Score for T {
 
 #[cfg(c_bindings)]
 define_score!(Writeable);
+
 #[cfg(not(c_bindings))]
 define_score!();
 
 /// A scorer that is accessed under a lock.
 ///
-/// Needed so that calls to [`Score::channel_penalty_msat`] in [`find_route`] can be made while
-/// having shared ownership of a scorer but without requiring internal locking in [`Score`]
+/// Needed so that calls to [`ScoreLookUp::channel_penalty_msat`] in [`find_route`] can be made while
+/// having shared ownership of a scorer but without requiring internal locking in [`ScoreUpdate`]
 /// implementations. Internal locking would be detrimental to route finding performance and could
-/// result in [`Score::channel_penalty_msat`] returning a different value for the same channel.
+/// result in [`ScoreLookUp::channel_penalty_msat`] returning a different value for the same channel.
 ///
 /// [`find_route`]: crate::routing::router::find_route
 pub trait LockableScore<'a> {
-       /// The [`Score`] type.
-       type Score: 'a + Score;
+       /// The [`ScoreUpdate`] type.
+       type ScoreUpdate: 'a + ScoreUpdate;
+       /// The [`ScoreLookUp`] type.
+       type ScoreLookUp: 'a + ScoreLookUp;
+
+       /// The write locked [`ScoreUpdate`] type.
+       type WriteLocked: DerefMut<Target = Self::ScoreUpdate> + Sized;
 
-       /// The locked [`Score`] type.
-       type Locked: DerefMut<Target = Self::Score> + Sized;
+       /// The read locked [`ScoreLookUp`] type.
+       type ReadLocked: Deref<Target = Self::ScoreLookUp> + Sized;
 
-       /// Returns the locked scorer.
-       fn lock(&'a self) -> Self::Locked;
+       /// Returns read locked scorer.
+       fn read_lock(&'a self) -> Self::ReadLocked;
+
+       /// Returns write locked scorer.
+       fn write_lock(&'a self) -> Self::WriteLocked;
 }
 
 /// Refers to a scorer that is accessible under lock and also writeable to disk
@@ -176,89 +192,138 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for Mutex<T> {
-       type Score = T;
-       type Locked = MutexGuard<'a, T>;
+impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+
+       type WriteLocked = MutexGuard<'a, Self::ScoreUpdate>;
+       type ReadLocked = MutexGuard<'a, Self::ScoreLookUp>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               Mutex::lock(self).unwrap()
+       }
 
-       fn lock(&'a self) -> Self::Locked {
+       fn write_lock(&'a self) -> Self::WriteLocked {
                Mutex::lock(self).unwrap()
        }
 }
 
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + Score> LockableScore<'a> for RefCell<T> {
-       type Score = T;
-       type Locked = RefMut<'a, T>;
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+
+       type WriteLocked = RefMut<'a, Self::ScoreUpdate>;
+       type ReadLocked = Ref<'a, Self::ScoreLookUp>;
 
-       fn lock(&'a self) -> Self::Locked {
+       fn write_lock(&'a self) -> Self::WriteLocked {
                self.borrow_mut()
        }
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               self.borrow()
+       }
+}
+
+#[cfg(not(c_bindings))]
+impl<'a, SP:Sized,  T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> LockableScore<'a> for RwLock<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+
+       type WriteLocked = RwLockWriteGuard<'a, Self::ScoreLookUp>;
+       type ReadLocked = RwLockReadGuard<'a, Self::ScoreUpdate>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               RwLock::read(self).unwrap()
+       }
+
+       fn write_lock(&'a self) -> Self::WriteLocked {
+               RwLock::write(self).unwrap()
+       }
 }
 
 #[cfg(c_bindings)]
 /// A concrete implementation of [`LockableScore`] which supports multi-threading.
-pub struct MultiThreadedLockableScore<T: Score> {
-       score: Mutex<T>,
+pub struct MultiThreadedLockableScore<T: ScoreLookUp + ScoreUpdate> {
+       score: RwLock<T>,
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> LockableScore<'a> for MultiThreadedLockableScore<T> {
-       type Score = T;
-       type Locked = MultiThreadedScoreLock<'a, T>;
+impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore<T> {
+       type ScoreUpdate = T;
+       type ScoreLookUp = T;
+       type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
+       type ReadLocked = MultiThreadedScoreLockRead<'a, Self::ScoreLookUp>;
+
+       fn read_lock(&'a self) -> Self::ReadLocked {
+               MultiThreadedScoreLockRead(self.score.read().unwrap())
+       }
 
-       fn lock(&'a self) -> Self::Locked {
-               MultiThreadedScoreLock(Mutex::lock(&self.score).unwrap())
+       fn write_lock(&'a self) -> Self::WriteLocked {
+               MultiThreadedScoreLockWrite(self.score.write().unwrap())
        }
 }
 
 #[cfg(c_bindings)]
-impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
+impl<T: ScoreUpdate + ScoreLookUp> Writeable for MultiThreadedLockableScore<T> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               self.lock().write(writer)
+               self.score.read().unwrap().write(writer)
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
 
 #[cfg(c_bindings)]
-impl<T: Score> MultiThreadedLockableScore<T> {
+impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
        /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
        pub fn new(score: T) -> Self {
-               MultiThreadedLockableScore { score: Mutex::new(score) }
+               MultiThreadedLockableScore { score: RwLock::new(score) }
        }
 }
 
 #[cfg(c_bindings)]
 /// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLock<'a, T: Score>(MutexGuard<'a, T>);
+pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>);
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Writeable for MultiThreadedScoreLock<'a, T> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               self.0.write(writer)
+/// A locked `MultiThreadedLockableScore`.
+pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>);
+
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
+       type Target = T;
+
+       fn deref(&self) -> &Self::Target {
+               self.0.deref()
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLock<'a, T> {
-    fn deref_mut(&mut self) -> &mut Self::Target {
-        self.0.deref_mut()
-    }
+impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+               self.0.write(writer)
+       }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLock<'a, T> {
+impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
        type Target = T;
 
-    fn deref(&self) -> &Self::Target {
-        self.0.deref()
-    }
+       fn deref(&self) -> &Self::Target {
+               self.0.deref()
+       }
 }
 
+#[cfg(c_bindings)]
+impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
+       fn deref_mut(&mut self) -> &mut Self::Target {
+               self.0.deref_mut()
+       }
+}
 
 
-/// Proposed use of a channel passed as a parameter to [`Score::channel_penalty_msat`].
+/// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
 #[derive(Clone, Copy, Debug, PartialEq)]
 pub struct ChannelUsage {
        /// The amount to send through the channel, denominated in millisatoshis.
@@ -273,7 +338,7 @@ pub struct ChannelUsage {
 }
 
 #[derive(Clone)]
-/// [`Score`] implementation that uses a fixed penalty.
+/// [`ScoreLookUp`] implementation that uses a fixed penalty.
 pub struct FixedPenaltyScorer {
        penalty_msat: u64,
 }
@@ -285,12 +350,14 @@ impl FixedPenaltyScorer {
        }
 }
 
-impl Score for FixedPenaltyScorer {
+impl ScoreLookUp for FixedPenaltyScorer {
        type ScoreParams = ();
        fn channel_penalty_msat(&self, _: u64, _: &NodeId, _: &NodeId, _: ChannelUsage, _score_params: &Self::ScoreParams) -> u64 {
                self.penalty_msat
        }
+}
 
+impl ScoreUpdate for FixedPenaltyScorer {
        fn payment_path_failed(&mut self, _path: &Path, _short_channel_id: u64) {}
 
        fn payment_path_successful(&mut self, _path: &Path) {}
@@ -323,7 +390,7 @@ use crate::util::time::Eternity;
 #[cfg(feature = "no-std")]
 type ConfiguredTime = Eternity;
 
-/// [`Score`] implementation using channel success probability distributions.
+/// [`ScoreLookUp`] implementation using channel success probability distributions.
 ///
 /// Channels are tracked with upper and lower liquidity bounds - when an HTLC fails at a channel,
 /// we learn that the upper-bound on the available liquidity is lower than the amount of the HTLC.
@@ -361,7 +428,7 @@ type ConfiguredTime = Eternity;
 /// [`historical_liquidity_penalty_amount_multiplier_msat`]: ProbabilisticScoringFeeParameters::historical_liquidity_penalty_amount_multiplier_msat
 pub type ProbabilisticScorer<G, L> = ProbabilisticScorerUsingTime::<G, L, ConfiguredTime>;
 
-/// Probabilistic [`Score`] implementation.
+/// Probabilistic [`ScoreLookUp`] implementation.
 ///
 /// This is not exported to bindings users generally all users should use the [`ProbabilisticScorer`] type alias.
 pub struct ProbabilisticScorerUsingTime<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time>
@@ -641,147 +708,6 @@ impl ProbabilisticScoringDecayParameters {
        }
 }
 
-/// Tracks the historical state of a distribution as a weighted average of how much time was spent
-/// in each of 8 buckets.
-#[derive(Clone, Copy)]
-struct HistoricalBucketRangeTracker {
-       buckets: [u16; 8],
-}
-
-impl HistoricalBucketRangeTracker {
-       fn new() -> Self { Self { buckets: [0; 8] } }
-       fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
-               // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
-               // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
-               //
-               // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
-               // the buckets for the current min and max liquidity offset positions.
-               //
-               // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
-               // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
-               // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
-               //
-               // In total, this allows us to track data for the last 8,000 or so payments across a given
-               // channel.
-               //
-               // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
-               // and need to balance having more bits in the decimal part (to ensure decay isn't too
-               // non-linear) with having too few bits in the mantissa, causing us to not store very many
-               // datapoints.
-               //
-               // The constants were picked experimentally, selecting a decay amount that restricts us
-               // from overflowing buckets without having to cap them manually.
-
-               // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
-               // the channel's capacity, though the second should generally never happen.
-               debug_assert!(liquidity_offset_msat <= capacity_msat);
-               let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
-                       .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
-               debug_assert!(bucket_idx < 8);
-               if bucket_idx < 8 {
-                       for e in self.buckets.iter_mut() {
-                               *e = ((*e as u32) * 2047 / 2048) as u16;
-                       }
-                       self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
-               }
-       }
-       /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
-       /// datapoints as we receive newer information.
-       fn time_decay_data(&mut self, half_lives: u32) {
-               for e in self.buckets.iter_mut() {
-                       *e = e.checked_shr(half_lives).unwrap_or(0);
-               }
-       }
-}
-
-impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
-
-struct HistoricalMinMaxBuckets<'a> {
-       min_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
-       max_liquidity_offset_history: &'a HistoricalBucketRangeTracker,
-}
-
-impl HistoricalMinMaxBuckets<'_> {
-       #[inline]
-       fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
-       -> ([u16; 8], [u16; 8], u32) {
-               let required_decays = now.duration_since(last_updated).as_secs()
-                       .checked_div(half_life.as_secs())
-                       .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
-               let mut min_buckets = *self.min_liquidity_offset_history;
-               min_buckets.time_decay_data(required_decays);
-               let mut max_buckets = *self.max_liquidity_offset_history;
-               max_buckets.time_decay_data(required_decays);
-               (min_buckets.buckets, max_buckets.buckets, required_decays)
-       }
-
-       #[inline]
-       fn calculate_success_probability_times_billion<T: Time>(
-               &self, now: T, last_updated: T, half_life: Duration, payment_amt_64th_bucket: u8)
-       -> Option<u64> {
-               // If historical penalties are enabled, calculate the penalty by walking the set of
-               // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
-               // each, calculate the probability of success given our payment amount, then total the
-               // weighted average probability of success.
-               //
-               // We use a sliding scale to decide which point within a given bucket will be compared to
-               // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
-               // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
-               // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
-               // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
-               // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
-               // penalties to channels at the edges.
-               //
-               // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
-               // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
-               // for a 1 BTC channel!).
-               //
-               // If we used the middle of each bucket we'd never assign any penalty at all when sending
-               // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
-               let mut total_valid_points_tracked = 0;
-
-               // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
-               // don't actually use the decayed buckets, though, as that would lose precision.
-               let (decayed_min_buckets, decayed_max_buckets, required_decays) =
-                       self.get_decayed_buckets(now, last_updated, half_life);
-               if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
-                       return None;
-               }
-
-               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                       for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
-                               total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
-                       }
-               }
-               // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
-               // it as if we were fully decayed.
-               if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
-                       return None;
-               }
-
-               let mut cumulative_success_prob_times_billion = 0;
-               for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
-                       for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
-                               let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
-                                       * 1024 * 1024 / total_valid_points_tracked;
-                               let min_64th_bucket = min_idx as u8 * 9;
-                               let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
-                               if payment_amt_64th_bucket > max_64th_bucket {
-                                       // Success probability 0, the payment amount is above the max liquidity
-                               } else if payment_amt_64th_bucket <= min_64th_bucket {
-                                       cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
-                               } else {
-                                       cumulative_success_prob_times_billion += bucket_prob_times_million *
-                                               ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
-                                               ((max_64th_bucket - min_64th_bucket) as u64);
-                               }
-                       }
-               }
-
-               Some(cumulative_success_prob_times_billion)
-       }
-}
-
 /// Accounting for channel liquidity balance uncertainty.
 ///
 /// Direction is defined in terms of [`NodeId`] partial ordering, where the source node is the
@@ -806,8 +732,7 @@ struct ChannelLiquidity<T: Time> {
 struct DirectedChannelLiquidity<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>, T: Time, U: Deref<Target = T>> {
        min_liquidity_offset_msat: L,
        max_liquidity_offset_msat: L,
-       min_liquidity_offset_history: BRT,
-       max_liquidity_offset_history: BRT,
+       liquidity_history: HistoricalMinMaxBuckets<BRT>,
        inflight_htlc_msat: u64,
        capacity_msat: u64,
        last_updated: U,
@@ -848,12 +773,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                                                let amt = directed_info.effective_capacity().as_msat();
                                                let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
 
-                                               let buckets = HistoricalMinMaxBuckets {
-                                                       min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
-                                                       max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
-                                               };
-                                               let (min_buckets, max_buckets, _) = buckets.get_decayed_buckets(now,
-                                                       *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
+                                               let (min_buckets, max_buckets, _) = dir_liq.liquidity_history
+                                                       .get_decayed_buckets(now, *dir_liq.last_updated,
+                                                               self.decay_params.historical_no_updates_half_life);
 
                                                log_debug!(self.logger, core::concat!(
                                                        "Liquidity from {} to {} via {} is in the range ({}, {}).\n",
@@ -917,6 +839,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
        ///
        /// Because the datapoints are decayed slowly over time, values will eventually return to
        /// `Some(([0; 8], [0; 8]))`.
+       ///
+       /// In order to fetch a single success probability from the buckets provided here, as used in
+       /// the scoring model, see [`Self::historical_estimated_payment_success_probability`].
        pub fn historical_estimated_channel_liquidity_probabilities(&self, scid: u64, target: &NodeId)
        -> Option<([u16; 8], [u16; 8])> {
                let graph = self.network_graph.read_only();
@@ -927,12 +852,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                                        let amt = directed_info.effective_capacity().as_msat();
                                        let dir_liq = liq.as_directed(source, target, 0, amt, self.decay_params);
 
-                                       let buckets = HistoricalMinMaxBuckets {
-                                               min_liquidity_offset_history: &dir_liq.min_liquidity_offset_history,
-                                               max_liquidity_offset_history: &dir_liq.max_liquidity_offset_history,
-                                       };
-                                       let (min_buckets, mut max_buckets, _) = buckets.get_decayed_buckets(T::now(),
-                                               *dir_liq.last_updated, self.decay_params.historical_no_updates_half_life);
+                                       let (min_buckets, mut max_buckets, _) = dir_liq.liquidity_history
+                                               .get_decayed_buckets(dir_liq.now, *dir_liq.last_updated,
+                                                       self.decay_params.historical_no_updates_half_life);
                                        // Note that the liquidity buckets are an offset from the edge, so we inverse
                                        // the max order to get the probabilities from zero.
                                        max_buckets.reverse();
@@ -942,6 +864,34 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ProbabilisticScorerU
                }
                None
        }
+
+       /// Query the probability of payment success sending the given `amount_msat` over the channel
+       /// with `scid` towards the given `target` node, based on the historical estimated liquidity
+       /// bounds.
+       ///
+       /// These are the same bounds as returned by
+       /// [`Self::historical_estimated_channel_liquidity_probabilities`] (but not those returned by
+       /// [`Self::estimated_channel_liquidity_range`]).
+       pub fn historical_estimated_payment_success_probability(
+               &self, scid: u64, target: &NodeId, amount_msat: u64)
+       -> Option<f64> {
+               let graph = self.network_graph.read_only();
+
+               if let Some(chan) = graph.channels().get(&scid) {
+                       if let Some(liq) = self.channel_liquidities.get(&scid) {
+                               if let Some((directed_info, source)) = chan.as_directed_to(target) {
+                                       let capacity_msat = directed_info.effective_capacity().as_msat();
+                                       let dir_liq = liq.as_directed(source, target, 0, capacity_msat, self.decay_params);
+
+                                       return dir_liq.liquidity_history.calculate_success_probability_times_billion(
+                                               dir_liq.now, *dir_liq.last_updated,
+                                               self.decay_params.historical_no_updates_half_life, amount_msat, capacity_msat
+                                       ).map(|p| p as f64 / (1024 * 1024 * 1024) as f64);
+                               }
+                       }
+               }
+               None
+       }
 }
 
 impl<T: Time> ChannelLiquidity<T> {
@@ -973,8 +923,10 @@ impl<T: Time> ChannelLiquidity<T> {
                DirectedChannelLiquidity {
                        min_liquidity_offset_msat,
                        max_liquidity_offset_msat,
-                       min_liquidity_offset_history,
-                       max_liquidity_offset_history,
+                       liquidity_history: HistoricalMinMaxBuckets {
+                               min_liquidity_offset_history,
+                               max_liquidity_offset_history,
+                       },
                        inflight_htlc_msat,
                        capacity_msat,
                        last_updated: &self.last_updated,
@@ -1000,8 +952,10 @@ impl<T: Time> ChannelLiquidity<T> {
                DirectedChannelLiquidity {
                        min_liquidity_offset_msat,
                        max_liquidity_offset_msat,
-                       min_liquidity_offset_history,
-                       max_liquidity_offset_history,
+                       liquidity_history: HistoricalMinMaxBuckets {
+                               min_liquidity_offset_history,
+                               max_liquidity_offset_history,
+                       },
                        inflight_htlc_msat,
                        capacity_msat,
                        last_updated: &mut self.last_updated,
@@ -1027,6 +981,7 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
        /// Returns a liquidity penalty for routing the given HTLC `amount_msat` through the channel in
        /// this direction.
        fn penalty_msat(&self, amount_msat: u64, score_params: &ProbabilisticScoringFeeParameters) -> u64 {
+               let available_capacity = self.available_capacity();
                let max_liquidity_msat = self.max_liquidity_msat();
                let min_liquidity_msat = core::cmp::min(self.min_liquidity_msat(), max_liquidity_msat);
 
@@ -1058,28 +1013,20 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
                        }
                };
 
+               if amount_msat >= available_capacity {
+                       // We're trying to send more than the capacity, use a max penalty.
+                       res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
+                               NEGATIVE_LOG10_UPPER_BOUND * 2048,
+                               score_params.historical_liquidity_penalty_multiplier_msat,
+                               score_params.historical_liquidity_penalty_amount_multiplier_msat));
+                       return res;
+               }
+
                if score_params.historical_liquidity_penalty_multiplier_msat != 0 ||
                   score_params.historical_liquidity_penalty_amount_multiplier_msat != 0 {
-                       let payment_amt_64th_bucket = if amount_msat < u64::max_value() / 64 {
-                               amount_msat * 64 / self.capacity_msat.saturating_add(1)
-                       } else {
-                               // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
-                               // division. This branch should only be hit in fuzz testing since the amount would
-                               // need to be over 2.88 million BTC in practice.
-                               ((amount_msat as u128) * 64 / (self.capacity_msat as u128).saturating_add(1))
-                                       .try_into().unwrap_or(65)
-                       };
-                       #[cfg(not(fuzzing))]
-                       debug_assert!(payment_amt_64th_bucket <= 64);
-                       if payment_amt_64th_bucket > 64 { return res; }
-
-                       let buckets = HistoricalMinMaxBuckets {
-                               min_liquidity_offset_history: &self.min_liquidity_offset_history,
-                               max_liquidity_offset_history: &self.max_liquidity_offset_history,
-                       };
-                       if let Some(cumulative_success_prob_times_billion) = buckets
+                       if let Some(cumulative_success_prob_times_billion) = self.liquidity_history
                                .calculate_success_probability_times_billion(self.now, *self.last_updated,
-                                       self.decay_params.historical_no_updates_half_life, payment_amt_64th_bucket as u8)
+                                       self.decay_params.historical_no_updates_half_life, amount_msat, self.capacity_msat)
                        {
                                let historical_negative_log10_times_2048 = approx::negative_log10_times_2048(cumulative_success_prob_times_billion + 1, 1024 * 1024 * 1024);
                                res = res.saturating_add(Self::combined_penalty_msat(amount_msat,
@@ -1105,15 +1052,15 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
 
        /// Computes the liquidity penalty from the penalty multipliers.
        #[inline(always)]
-       fn combined_penalty_msat(amount_msat: u64, negative_log10_times_2048: u64,
+       fn combined_penalty_msat(amount_msat: u64, mut negative_log10_times_2048: u64,
                liquidity_penalty_multiplier_msat: u64, liquidity_penalty_amount_multiplier_msat: u64,
        ) -> u64 {
-               let liquidity_penalty_msat = {
-                       // Upper bound the liquidity penalty to ensure some channel is selected.
-                       let multiplier_msat = liquidity_penalty_multiplier_msat;
-                       let max_penalty_msat = multiplier_msat.saturating_mul(NEGATIVE_LOG10_UPPER_BOUND);
-                       (negative_log10_times_2048.saturating_mul(multiplier_msat) / 2048).min(max_penalty_msat)
-               };
+               negative_log10_times_2048 =
+                       negative_log10_times_2048.min(NEGATIVE_LOG10_UPPER_BOUND * 2048);
+
+               // Upper bound the liquidity penalty to ensure some channel is selected.
+               let liquidity_penalty_msat = negative_log10_times_2048
+                       .saturating_mul(liquidity_penalty_multiplier_msat) / 2048;
                let amount_penalty_msat = negative_log10_times_2048
                        .saturating_mul(liquidity_penalty_amount_multiplier_msat)
                        .saturating_mul(amount_msat) / 2048 / AMOUNT_PENALTY_DIVISOR;
@@ -1122,17 +1069,20 @@ impl<L: Deref<Target = u64>, BRT: Deref<Target = HistoricalBucketRangeTracker>,
        }
 
        /// Returns the lower bound of the channel liquidity balance in this direction.
+       #[inline(always)]
        fn min_liquidity_msat(&self) -> u64 {
                self.decayed_offset_msat(*self.min_liquidity_offset_msat)
        }
 
        /// Returns the upper bound of the channel liquidity balance in this direction.
+       #[inline(always)]
        fn max_liquidity_msat(&self) -> u64 {
                self.available_capacity()
                        .saturating_sub(self.decayed_offset_msat(*self.max_liquidity_offset_msat))
        }
 
        /// Returns the capacity minus the in-flight HTLCs in this direction.
+       #[inline(always)]
        fn available_capacity(&self) -> u64 {
                self.capacity_msat.saturating_sub(self.inflight_htlc_msat)
        }
@@ -1199,15 +1149,15 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
                let half_lives = self.now.duration_since(*self.last_updated).as_secs()
                        .checked_div(self.decay_params.historical_no_updates_half_life.as_secs())
                        .map(|v| v.try_into().unwrap_or(u32::max_value())).unwrap_or(u32::max_value());
-               self.min_liquidity_offset_history.time_decay_data(half_lives);
-               self.max_liquidity_offset_history.time_decay_data(half_lives);
+               self.liquidity_history.min_liquidity_offset_history.time_decay_data(half_lives);
+               self.liquidity_history.max_liquidity_offset_history.time_decay_data(half_lives);
 
                let min_liquidity_offset_msat = self.decayed_offset_msat(*self.min_liquidity_offset_msat);
-               self.min_liquidity_offset_history.track_datapoint(
+               self.liquidity_history.min_liquidity_offset_history.track_datapoint(
                        min_liquidity_offset_msat, self.capacity_msat
                );
                let max_liquidity_offset_msat = self.decayed_offset_msat(*self.max_liquidity_offset_msat);
-               self.max_liquidity_offset_history.track_datapoint(
+               self.liquidity_history.max_liquidity_offset_history.track_datapoint(
                        max_liquidity_offset_msat, self.capacity_msat
                );
        }
@@ -1235,7 +1185,7 @@ impl<L: DerefMut<Target = u64>, BRT: DerefMut<Target = HistoricalBucketRangeTrac
        }
 }
 
-impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreLookUp for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        type ScoreParams = ProbabilisticScoringFeeParameters;
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &ProbabilisticScoringFeeParameters
@@ -1278,7 +1228,9 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for Probabilis
                        .saturating_add(anti_probing_penalty_msat)
                        .saturating_add(base_penalty_msat)
        }
+}
 
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
                let amount_msat = path.final_value_msat();
                log_trace!(self.logger, "Scoring path through to SCID {} as having failed at {} msat", short_channel_id, amount_msat);
@@ -1663,6 +1615,166 @@ mod approx {
        }
 }
 
+mod bucketed_history {
+       use super::*;
+
+       /// Tracks the historical state of a distribution as a weighted average of how much time was spent
+       /// in each of 8 buckets.
+       #[derive(Clone, Copy)]
+       pub(super) struct HistoricalBucketRangeTracker {
+               buckets: [u16; 8],
+       }
+
+       impl HistoricalBucketRangeTracker {
+               pub(super) fn new() -> Self { Self { buckets: [0; 8] } }
+               pub(super) fn track_datapoint(&mut self, liquidity_offset_msat: u64, capacity_msat: u64) {
+                       // We have 8 leaky buckets for min and max liquidity. Each bucket tracks the amount of time
+                       // we spend in each bucket as a 16-bit fixed-point number with a 5 bit fractional part.
+                       //
+                       // Each time we update our liquidity estimate, we add 32 (1.0 in our fixed-point system) to
+                       // the buckets for the current min and max liquidity offset positions.
+                       //
+                       // We then decay each bucket by multiplying by 2047/2048 (avoiding dividing by a
+                       // non-power-of-two). This ensures we can't actually overflow the u16 - when we get to
+                       // 63,457 adding 32 and decaying by 2047/2048 leaves us back at 63,457.
+                       //
+                       // In total, this allows us to track data for the last 8,000 or so payments across a given
+                       // channel.
+                       //
+                       // These constants are a balance - we try to fit in 2 bytes per bucket to reduce overhead,
+                       // and need to balance having more bits in the decimal part (to ensure decay isn't too
+                       // non-linear) with having too few bits in the mantissa, causing us to not store very many
+                       // datapoints.
+                       //
+                       // The constants were picked experimentally, selecting a decay amount that restricts us
+                       // from overflowing buckets without having to cap them manually.
+
+                       // Ensure the bucket index is in the range [0, 7], even if the liquidity offset is zero or
+                       // the channel's capacity, though the second should generally never happen.
+                       debug_assert!(liquidity_offset_msat <= capacity_msat);
+                       let bucket_idx: u8 = (liquidity_offset_msat * 8 / capacity_msat.saturating_add(1))
+                               .try_into().unwrap_or(32); // 32 is bogus for 8 buckets, and will be ignored
+                       debug_assert!(bucket_idx < 8);
+                       if bucket_idx < 8 {
+                               for e in self.buckets.iter_mut() {
+                                       *e = ((*e as u32) * 2047 / 2048) as u16;
+                               }
+                               self.buckets[bucket_idx as usize] = self.buckets[bucket_idx as usize].saturating_add(32);
+                       }
+               }
+               /// Decay all buckets by the given number of half-lives. Used to more aggressively remove old
+               /// datapoints as we receive newer information.
+               pub(super) fn time_decay_data(&mut self, half_lives: u32) {
+                       for e in self.buckets.iter_mut() {
+                               *e = e.checked_shr(half_lives).unwrap_or(0);
+                       }
+               }
+       }
+
+       impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) });
+
+       pub(super) struct HistoricalMinMaxBuckets<D: Deref<Target = HistoricalBucketRangeTracker>> {
+               pub(super) min_liquidity_offset_history: D,
+               pub(super) max_liquidity_offset_history: D,
+       }
+
+       impl<D: Deref<Target = HistoricalBucketRangeTracker>> HistoricalMinMaxBuckets<D> {
+               #[inline]
+               pub(super) fn get_decayed_buckets<T: Time>(&self, now: T, last_updated: T, half_life: Duration)
+               -> ([u16; 8], [u16; 8], u32) {
+                       let required_decays = now.duration_since(last_updated).as_secs()
+                               .checked_div(half_life.as_secs())
+                               .map_or(u32::max_value(), |decays| cmp::min(decays, u32::max_value() as u64) as u32);
+                       let mut min_buckets = *self.min_liquidity_offset_history;
+                       min_buckets.time_decay_data(required_decays);
+                       let mut max_buckets = *self.max_liquidity_offset_history;
+                       max_buckets.time_decay_data(required_decays);
+                       (min_buckets.buckets, max_buckets.buckets, required_decays)
+               }
+
+               #[inline]
+               pub(super) fn calculate_success_probability_times_billion<T: Time>(
+                       &self, now: T, last_updated: T, half_life: Duration, amount_msat: u64, capacity_msat: u64)
+               -> Option<u64> {
+                       // If historical penalties are enabled, calculate the penalty by walking the set of
+                       // historical liquidity bucket (min, max) combinations (where min_idx < max_idx) and, for
+                       // each, calculate the probability of success given our payment amount, then total the
+                       // weighted average probability of success.
+                       //
+                       // We use a sliding scale to decide which point within a given bucket will be compared to
+                       // the amount being sent - for lower-bounds, the amount being sent is compared to the lower
+                       // edge of the first bucket (i.e. zero), but compared to the upper 7/8ths of the last
+                       // bucket (i.e. 9 times the index, or 63), with each bucket in between increasing the
+                       // comparison point by 1/64th. For upper-bounds, the same applies, however with an offset
+                       // of 1/64th (i.e. starting at one and ending at 64). This avoids failing to assign
+                       // penalties to channels at the edges.
+                       //
+                       // If we used the bottom edge of buckets, we'd end up never assigning any penalty at all to
+                       // such a channel when sending less than ~0.19% of the channel's capacity (e.g. ~200k sats
+                       // for a 1 BTC channel!).
+                       //
+                       // If we used the middle of each bucket we'd never assign any penalty at all when sending
+                       // less than 1/16th of a channel's capacity, or 1/8th if we used the top of the bucket.
+                       let mut total_valid_points_tracked = 0;
+
+                       let payment_amt_64th_bucket: u8 = if amount_msat < u64::max_value() / 64 {
+                               (amount_msat * 64 / capacity_msat.saturating_add(1))
+                                       .try_into().unwrap_or(65)
+                       } else {
+                               // Only use 128-bit arithmetic when multiplication will overflow to avoid 128-bit
+                               // division. This branch should only be hit in fuzz testing since the amount would
+                               // need to be over 2.88 million BTC in practice.
+                               ((amount_msat as u128) * 64 / (capacity_msat as u128).saturating_add(1))
+                                       .try_into().unwrap_or(65)
+                       };
+                       #[cfg(not(fuzzing))]
+                       debug_assert!(payment_amt_64th_bucket <= 64);
+                       if payment_amt_64th_bucket >= 64 { return None; }
+
+                       // Check if all our buckets are zero, once decayed and treat it as if we had no data. We
+                       // don't actually use the decayed buckets, though, as that would lose precision.
+                       let (decayed_min_buckets, decayed_max_buckets, required_decays) =
+                               self.get_decayed_buckets(now, last_updated, half_life);
+                       if decayed_min_buckets.iter().all(|v| *v == 0) || decayed_max_buckets.iter().all(|v| *v == 0) {
+                               return None;
+                       }
+
+                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+                               for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(8 - min_idx) {
+                                       total_valid_points_tracked += (*min_bucket as u64) * (*max_bucket as u64);
+                               }
+                       }
+                       // If the total valid points is smaller than 1.0 (i.e. 32 in our fixed-point scheme), treat
+                       // it as if we were fully decayed.
+                       if total_valid_points_tracked.checked_shr(required_decays).unwrap_or(0) < 32*32 {
+                               return None;
+                       }
+
+                       let mut cumulative_success_prob_times_billion = 0;
+                       for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() {
+                               for (max_idx, max_bucket) in self.max_liquidity_offset_history.buckets.iter().enumerate().take(8 - min_idx) {
+                                       let bucket_prob_times_million = (*min_bucket as u64) * (*max_bucket as u64)
+                                               * 1024 * 1024 / total_valid_points_tracked;
+                                       let min_64th_bucket = min_idx as u8 * 9;
+                                       let max_64th_bucket = (7 - max_idx as u8) * 9 + 1;
+                                       if payment_amt_64th_bucket > max_64th_bucket {
+                                               // Success probability 0, the payment amount is above the max liquidity
+                                       } else if payment_amt_64th_bucket <= min_64th_bucket {
+                                               cumulative_success_prob_times_billion += bucket_prob_times_million * 1024;
+                                       } else {
+                                               cumulative_success_prob_times_billion += bucket_prob_times_million *
+                                                       ((max_64th_bucket - payment_amt_64th_bucket) as u64) * 1024 /
+                                                       ((max_64th_bucket - min_64th_bucket) as u64);
+                                       }
+                               }
+                       }
+
+                       Some(cumulative_success_prob_times_billion)
+               }
+       }
+}
+use bucketed_history::{HistoricalBucketRangeTracker, HistoricalMinMaxBuckets};
+
 impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Writeable for ProbabilisticScorerUsingTime<G, L, T> where L::Target: Logger {
        #[inline]
        fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
@@ -1757,7 +1869,7 @@ mod tests {
        use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, UnsignedChannelAnnouncement, UnsignedChannelUpdate};
        use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
        use crate::routing::router::{BlindedTail, Path, RouteHop};
-       use crate::routing::scoring::{ChannelUsage, Score};
+       use crate::routing::scoring::{ChannelUsage, ScoreLookUp, ScoreUpdate};
        use crate::util::ser::{ReadableArgs, Writeable};
        use crate::util::test_utils::{self, TestLogger};
 
@@ -2856,6 +2968,8 @@ mod tests {
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 47);
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        None);
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 42),
+                       None);
 
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
@@ -2863,6 +2977,10 @@ mod tests {
                // octile.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([32, 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 0, 0, 0, 0, 0])));
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1),
+                       Some(1.0));
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 500),
+                       Some(0.0));
 
                // Even after we tell the scorer we definitely have enough available liquidity, it will
                // still remember that there was some failure in the past, and assign a non-0 penalty.
@@ -2872,6 +2990,17 @@ mod tests {
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([31, 0, 0, 0, 0, 0, 0, 32], [31, 0, 0, 0, 0, 0, 0, 32])));
 
+               // The exact success probability is a bit complicated and involves integer rounding, so we
+               // simply check bounds here.
+               let five_hundred_prob =
+                       scorer.historical_estimated_payment_success_probability(42, &target, 500).unwrap();
+               assert!(five_hundred_prob > 0.5);
+               assert!(five_hundred_prob < 0.52);
+               let one_prob =
+                       scorer.historical_estimated_payment_success_probability(42, &target, 1).unwrap();
+               assert!(one_prob < 1.0);
+               assert!(one_prob > 0.99);
+
                // Advance the time forward 16 half-lives (which the docs claim will ensure all data is
                // gone), and check that we're back to where we started.
                SinceEpoch::advance(Duration::from_secs(10 * 16));
@@ -2880,13 +3009,16 @@ mod tests {
                // data entirely instead.
                assert_eq!(scorer.historical_estimated_channel_liquidity_probabilities(42, &target),
                        Some(([0; 8], [0; 8])));
+               assert_eq!(scorer.historical_estimated_payment_success_probability(42, &target, 1), None);
 
-               let usage = ChannelUsage {
+               let mut usage = ChannelUsage {
                        amount_msat: 100,
                        inflight_htlc_msat: 1024,
                        effective_capacity: EffectiveCapacity::Total { capacity_msat: 1_024, htlc_maximum_msat: 1_024 },
                };
                scorer.payment_path_failed(&payment_path_for_amount(1), 42);
+               assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 2048);
+               usage.inflight_htlc_msat = 0;
                assert_eq!(scorer.channel_penalty_msat(42, &source, &target, usage, &params), 409);
 
                let usage = ChannelUsage {
diff --git a/lightning/src/util/enforcing_trait_impls.rs b/lightning/src/util/enforcing_trait_impls.rs
deleted file mode 100644 (file)
index df0f13b..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
-use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
-use crate::ln::{chan_utils, msgs, PaymentPreimage};
-use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
-
-use crate::prelude::*;
-use core::cmp;
-use crate::sync::{Mutex, Arc};
-#[cfg(test)] use crate::sync::MutexGuard;
-
-use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
-use bitcoin::util::sighash;
-
-use bitcoin::secp256k1;
-use bitcoin::secp256k1::{SecretKey, PublicKey};
-use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
-use crate::events::bump_transaction::HTLCDescriptor;
-use crate::util::ser::{Writeable, Writer};
-use crate::io::Error;
-use crate::ln::features::ChannelTypeFeatures;
-
-/// Initial value for revoked commitment downward counter
-pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
-
-/// An implementation of Sign that enforces some policy checks.  The current checks
-/// are an incomplete set.  They include:
-///
-/// - When signing, the holder transaction has not been revoked
-/// - When revoking, the holder transaction has not been signed
-/// - The holder commitment number is monotonic and without gaps
-/// - The revoked holder commitment number is monotonic and without gaps
-/// - There is at least one unrevoked holder transaction at all times
-/// - The counterparty commitment number is monotonic and without gaps
-/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
-///
-/// Eventually we will probably want to expose a variant of this which would essentially
-/// be what you'd want to run on a hardware wallet.
-///
-/// Note that counterparty signatures on the holder transaction are not checked, but it should
-/// be in a complete implementation.
-///
-/// Note that before we do so we should ensure its serialization format has backwards- and
-/// forwards-compatibility prefix/suffixes!
-#[derive(Clone)]
-pub struct EnforcingSigner {
-       pub inner: InMemorySigner,
-       /// Channel state used for policy enforcement
-       pub state: Arc<Mutex<EnforcementState>>,
-       pub disable_revocation_policy_check: bool,
-}
-
-impl PartialEq for EnforcingSigner {
-       fn eq(&self, o: &Self) -> bool {
-               Arc::ptr_eq(&self.state, &o.state)
-       }
-}
-
-impl EnforcingSigner {
-       /// Construct an EnforcingSigner
-       pub fn new(inner: InMemorySigner) -> Self {
-               let state = Arc::new(Mutex::new(EnforcementState::new()));
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check: false
-               }
-       }
-
-       /// Construct an EnforcingSigner with externally managed storage
-       ///
-       /// Since there are multiple copies of this struct for each channel, some coordination is needed
-       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
-       /// here, usually by an implementation of KeysInterface.
-       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
-               Self {
-                       inner,
-                       state,
-                       disable_revocation_policy_check
-               }
-       }
-
-       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
-
-       #[cfg(test)]
-       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
-               self.state.lock().unwrap()
-       }
-}
-
-impl ChannelSigner for EnforcingSigner {
-       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
-               self.inner.get_per_commitment_point(idx, secp_ctx)
-       }
-
-       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
-               {
-                       let mut state = self.state.lock().unwrap();
-                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
-                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
-                       state.last_holder_revoked_commitment = idx;
-               }
-               self.inner.release_commitment_secret(idx)
-       }
-
-       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               let idx = holder_tx.commitment_number();
-               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
-               state.last_holder_commitment = idx;
-               Ok(())
-       }
-
-       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
-
-       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
-
-       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
-               self.inner.provide_channel_parameters(channel_parameters)
-       }
-}
-
-impl EcdsaChannelSigner for EnforcingSigner {
-       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
-
-               {
-                       let mut state = self.state.lock().unwrap();
-                       let actual_commitment_number = commitment_tx.commitment_number();
-                       let last_commitment_number = state.last_counterparty_commitment;
-                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
-                       // or the next one.
-                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
-                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
-                       // the last and the one we are trying to sign
-                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
-                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
-               }
-
-               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
-       }
-
-       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
-               let mut state = self.state.lock().unwrap();
-               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
-               state.last_counterparty_revoked_commitment = idx;
-               Ok(())
-       }
-
-       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
-               let commitment_txid = trusted_tx.txid();
-               let holder_csv = self.inner.counterparty_selected_contest_delay();
-
-               let state = self.state.lock().unwrap();
-               let commitment_number = trusted_tx.commitment_number();
-               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
-                       if !self.disable_revocation_policy_check {
-                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
-                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
-                       }
-               }
-
-               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
-                       assert!(this_htlc.transaction_output_index.is_some());
-                       let keys = trusted_tx.keys();
-                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
-
-                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
-
-                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
-                               EcdsaSighashType::SinglePlusAnyoneCanPay
-                       } else {
-                               EcdsaSighashType::All
-                       };
-                       let sighash = hash_to_message!(
-                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
-                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
-                               ).unwrap()[..]
-                       );
-                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
-               }
-
-               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
-               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
-       }
-
-       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_htlc_transaction(
-               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
-               secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
-               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
-               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
-       }
-
-       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
-       }
-
-       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
-               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
-                       .expect("derived different closing transaction");
-               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
-       }
-
-       fn sign_holder_anchor_input(
-               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
-       ) -> Result<Signature, ()> {
-               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
-               // As long as our minimum dust limit is enforced and is greater than our anchor output
-               // value, an anchor output can only have an index within [0, 1].
-               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
-               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
-       }
-
-       fn sign_channel_announcement_with_funding_key(
-               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
-       ) -> Result<Signature, ()> {
-               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
-       }
-}
-
-impl WriteableEcdsaChannelSigner for EnforcingSigner {}
-
-impl Writeable for EnforcingSigner {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
-               // EnforcingSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
-               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
-               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
-               // and will be serialized as needed by the implementation of that trait.
-               self.inner.write(writer)?;
-               Ok(())
-       }
-}
-
-impl EnforcingSigner {
-       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
-                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-
-       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
-               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
-                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
-                       .expect("derived different per-tx keys or built transaction")
-       }
-}
-
-/// The state used by [`EnforcingSigner`] in order to enforce policy checks
-///
-/// This structure is maintained by KeysInterface since we may have multiple copies of
-/// the signer and they must coordinate their state.
-#[derive(Clone)]
-pub struct EnforcementState {
-       /// The last counterparty commitment number we signed, backwards counting
-       pub last_counterparty_commitment: u64,
-       /// The last counterparty commitment they revoked, backwards counting
-       pub last_counterparty_revoked_commitment: u64,
-       /// The last holder commitment number we revoked, backwards counting
-       pub last_holder_revoked_commitment: u64,
-       /// The last validated holder commitment number, backwards counting
-       pub last_holder_commitment: u64,
-}
-
-impl EnforcementState {
-       /// Enforcement state for a new channel
-       pub fn new() -> Self {
-               EnforcementState {
-                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
-               }
-       }
-}
index e79980370342ff01920fe5a37b4423719a228ac7..4836b4d6814f705cb111515a983733c4566e3a34 100644 (file)
@@ -15,7 +15,6 @@ use bitcoin::blockdata::transaction::Transaction;
 
 use crate::routing::router::Route;
 use crate::ln::chan_utils::HTLCClaim;
-use crate::util::logger::DebugBytes;
 
 macro_rules! log_iter {
        ($obj: expr) => {
@@ -42,10 +41,7 @@ macro_rules! log_bytes {
 pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16);
 impl<'a> core::fmt::Display for DebugFundingChannelId<'a> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               for i in (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().iter() {
-                       write!(f, "{:02x}", i)?;
-               }
-               Ok(())
+               (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f)
        }
 }
 macro_rules! log_funding_channel_id {
@@ -57,7 +53,7 @@ macro_rules! log_funding_channel_id {
 pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T));
 impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> {
        fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
-               DebugBytes(&(self.0).0.to_channel_id()[..]).fmt(f)
+               (self.0).0.to_channel_id().fmt(f)
        }
 }
 macro_rules! log_funding_info {
index dd9d2744e1c7927ba82bab9244a355020b5d9660..cc1b5f581afb233fd1ee5e6c265d9d5f9ea492ae 100644 (file)
@@ -56,5 +56,5 @@ pub mod test_utils;
 /// impls of traits that add exra enforcement on the way they're called. Useful for detecting state
 /// machine errors and used in fuzz targets and tests.
 #[cfg(any(test, feature = "_test_utils"))]
-pub mod enforcing_trait_impls;
+pub mod test_channel_signer;
 
diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs
new file mode 100644 (file)
index 0000000..2fb1c49
--- /dev/null
@@ -0,0 +1,297 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, MIN_CHAN_DUST_LIMIT_SATOSHIS};
+use crate::ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitmentTransaction, CommitmentTransaction, ChannelTransactionParameters, TrustedCommitmentTransaction, ClosingTransaction};
+use crate::ln::{chan_utils, msgs, PaymentPreimage};
+use crate::sign::{WriteableEcdsaChannelSigner, InMemorySigner, ChannelSigner, EcdsaChannelSigner};
+
+use crate::prelude::*;
+use core::cmp;
+use crate::sync::{Mutex, Arc};
+#[cfg(test)] use crate::sync::MutexGuard;
+
+use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
+use bitcoin::util::sighash;
+
+use bitcoin::secp256k1;
+use bitcoin::secp256k1::{SecretKey, PublicKey};
+use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
+use crate::events::bump_transaction::HTLCDescriptor;
+use crate::util::ser::{Writeable, Writer};
+use crate::io::Error;
+use crate::ln::features::ChannelTypeFeatures;
+
+/// Initial value for revoked commitment downward counter
+pub const INITIAL_REVOKED_COMMITMENT_NUMBER: u64 = 1 << 48;
+
+/// An implementation of Sign that enforces some policy checks.  The current checks
+/// are an incomplete set.  They include:
+///
+/// - When signing, the holder transaction has not been revoked
+/// - When revoking, the holder transaction has not been signed
+/// - The holder commitment number is monotonic and without gaps
+/// - The revoked holder commitment number is monotonic and without gaps
+/// - There is at least one unrevoked holder transaction at all times
+/// - The counterparty commitment number is monotonic and without gaps
+/// - The pre-derived keys and pre-built transaction in CommitmentTransaction were correctly built
+///
+/// Eventually we will probably want to expose a variant of this which would essentially
+/// be what you'd want to run on a hardware wallet.
+///
+/// Note that counterparty signatures on the holder transaction are not checked, but it should
+/// be in a complete implementation.
+///
+/// Note that before we do so we should ensure its serialization format has backwards- and
+/// forwards-compatibility prefix/suffixes!
+#[derive(Clone)]
+pub struct TestChannelSigner {
+       pub inner: InMemorySigner,
+       /// Channel state used for policy enforcement
+       pub state: Arc<Mutex<EnforcementState>>,
+       pub disable_revocation_policy_check: bool,
+}
+
+impl PartialEq for TestChannelSigner {
+       fn eq(&self, o: &Self) -> bool {
+               Arc::ptr_eq(&self.state, &o.state)
+       }
+}
+
+impl TestChannelSigner {
+       /// Construct an TestChannelSigner
+       pub fn new(inner: InMemorySigner) -> Self {
+               let state = Arc::new(Mutex::new(EnforcementState::new()));
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check: false
+               }
+       }
+
+       /// Construct an TestChannelSigner with externally managed storage
+       ///
+       /// Since there are multiple copies of this struct for each channel, some coordination is needed
+       /// so that all copies are aware of enforcement state.  A pointer to this state is provided
+       /// here, usually by an implementation of KeysInterface.
+       pub fn new_with_revoked(inner: InMemorySigner, state: Arc<Mutex<EnforcementState>>, disable_revocation_policy_check: bool) -> Self {
+               Self {
+                       inner,
+                       state,
+                       disable_revocation_policy_check
+               }
+       }
+
+       pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features() }
+
+       #[cfg(test)]
+       pub fn get_enforcement_state(&self) -> MutexGuard<EnforcementState> {
+               self.state.lock().unwrap()
+       }
+}
+
+impl ChannelSigner for TestChannelSigner {
+       fn get_per_commitment_point(&self, idx: u64, secp_ctx: &Secp256k1<secp256k1::All>) -> PublicKey {
+               self.inner.get_per_commitment_point(idx, secp_ctx)
+       }
+
+       fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
+               {
+                       let mut state = self.state.lock().unwrap();
+                       assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment);
+                       assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment);
+                       state.last_holder_revoked_commitment = idx;
+               }
+               self.inner.release_commitment_secret(idx)
+       }
+
+       fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _preimages: Vec<PaymentPreimage>) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               let idx = holder_tx.commitment_number();
+               assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment);
+               state.last_holder_commitment = idx;
+               Ok(())
+       }
+
+       fn pubkeys(&self) -> &ChannelPublicKeys { self.inner.pubkeys() }
+
+       fn channel_keys_id(&self) -> [u8; 32] { self.inner.channel_keys_id() }
+
+       fn provide_channel_parameters(&mut self, channel_parameters: &ChannelTransactionParameters) {
+               self.inner.provide_channel_parameters(channel_parameters)
+       }
+}
+
+impl EcdsaChannelSigner for TestChannelSigner {
+       fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, preimages: Vec<PaymentPreimage>, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
+
+               {
+                       let mut state = self.state.lock().unwrap();
+                       let actual_commitment_number = commitment_tx.commitment_number();
+                       let last_commitment_number = state.last_counterparty_commitment;
+                       // These commitment numbers are backwards counting.  We expect either the same as the previously encountered,
+                       // or the next one.
+                       assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number);
+                       // Ensure that the counterparty doesn't get more than two broadcastable commitments -
+                       // the last and the one we are trying to sign
+                       assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment);
+                       state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number)
+               }
+
+               Ok(self.inner.sign_counterparty_commitment(commitment_tx, preimages, secp_ctx).unwrap())
+       }
+
+       fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
+               let mut state = self.state.lock().unwrap();
+               assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment);
+               state.last_counterparty_revoked_commitment = idx;
+               Ok(())
+       }
+
+       fn sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
+               let commitment_txid = trusted_tx.txid();
+               let holder_csv = self.inner.counterparty_selected_contest_delay();
+
+               let state = self.state.lock().unwrap();
+               let commitment_number = trusted_tx.commitment_number();
+               if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number {
+                       if !self.disable_revocation_policy_check {
+                               panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}",
+                                      state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0])
+                       }
+               }
+
+               for (this_htlc, sig) in trusted_tx.htlcs().iter().zip(&commitment_tx.counterparty_htlc_sigs) {
+                       assert!(this_htlc.transaction_output_index.is_some());
+                       let keys = trusted_tx.keys();
+                       let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, trusted_tx.feerate_per_kw(), holder_csv, &this_htlc, self.channel_type_features(), &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+                       let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&this_htlc, self.channel_type_features(), &keys);
+
+                       let sighash_type = if self.channel_type_features().supports_anchors_zero_fee_htlc_tx() {
+                               EcdsaSighashType::SinglePlusAnyoneCanPay
+                       } else {
+                               EcdsaSighashType::All
+                       };
+                       let sighash = hash_to_message!(
+                               &sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(
+                                       0, &htlc_redeemscript, this_htlc.amount_msat / 1000, sighash_type,
+                               ).unwrap()[..]
+                       );
+                       secp_ctx.verify_ecdsa(&sighash, sig, &keys.countersignatory_htlc_key).unwrap();
+               }
+
+               Ok(self.inner.sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+       fn unsafe_sign_holder_commitment_and_htlcs(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<(Signature, Vec<Signature>), ()> {
+               Ok(self.inner.unsafe_sign_holder_commitment_and_htlcs(commitment_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_output(justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
+       }
+
+       fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_justice_revoked_htlc(justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_htlc_transaction(
+               &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
+               secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input());
+               assert_eq!(htlc_tx.output[input], htlc_descriptor.tx_output(secp_ctx));
+               Ok(self.inner.sign_holder_htlc_transaction(htlc_tx, input, htlc_descriptor, secp_ctx).unwrap())
+       }
+
+       fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               Ok(self.inner.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
+       }
+
+       fn sign_closing_transaction(&self, closing_tx: &ClosingTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
+               closing_tx.verify(self.inner.funding_outpoint().into_bitcoin_outpoint())
+                       .expect("derived different closing transaction");
+               Ok(self.inner.sign_closing_transaction(closing_tx, secp_ctx).unwrap())
+       }
+
+       fn sign_holder_anchor_input(
+               &self, anchor_tx: &Transaction, input: usize, secp_ctx: &Secp256k1<secp256k1::All>,
+       ) -> Result<Signature, ()> {
+               debug_assert!(MIN_CHAN_DUST_LIMIT_SATOSHIS > ANCHOR_OUTPUT_VALUE_SATOSHI);
+               // As long as our minimum dust limit is enforced and is greater than our anchor output
+               // value, an anchor output can only have an index within [0, 1].
+               assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
+               self.inner.sign_holder_anchor_input(anchor_tx, input, secp_ctx)
+       }
+
+       fn sign_channel_announcement_with_funding_key(
+               &self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<secp256k1::All>
+       ) -> Result<Signature, ()> {
+               self.inner.sign_channel_announcement_with_funding_key(msg, secp_ctx)
+       }
+}
+
+impl WriteableEcdsaChannelSigner for TestChannelSigner {}
+
+impl Writeable for TestChannelSigner {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+               // TestChannelSigner has two fields - `inner` ([`InMemorySigner`]) and `state`
+               // ([`EnforcementState`]). `inner` is serialized here and deserialized by
+               // [`SignerProvider::read_chan_signer`]. `state` is managed by [`SignerProvider`]
+               // and will be serialized as needed by the implementation of that trait.
+               self.inner.write(writer)?;
+               Ok(())
+       }
+}
+
+impl TestChannelSigner {
+       fn verify_counterparty_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_counterparty_broadcastable(),
+                                    self.inner.counterparty_pubkeys(), self.inner.pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+
+       fn verify_holder_commitment_tx<'a, T: secp256k1::Signing + secp256k1::Verification>(&self, commitment_tx: &'a CommitmentTransaction, secp_ctx: &Secp256k1<T>) -> TrustedCommitmentTransaction<'a> {
+               commitment_tx.verify(&self.inner.get_channel_parameters().as_holder_broadcastable(),
+                                    self.inner.pubkeys(), self.inner.counterparty_pubkeys(), secp_ctx)
+                       .expect("derived different per-tx keys or built transaction")
+       }
+}
+
+/// The state used by [`TestChannelSigner`] in order to enforce policy checks
+///
+/// This structure is maintained by KeysInterface since we may have multiple copies of
+/// the signer and they must coordinate their state.
+#[derive(Clone)]
+pub struct EnforcementState {
+       /// The last counterparty commitment number we signed, backwards counting
+       pub last_counterparty_commitment: u64,
+       /// The last counterparty commitment they revoked, backwards counting
+       pub last_counterparty_revoked_commitment: u64,
+       /// The last holder commitment number we revoked, backwards counting
+       pub last_holder_revoked_commitment: u64,
+       /// The last validated holder commitment number, backwards counting
+       pub last_holder_commitment: u64,
+}
+
+impl EnforcementState {
+       /// Enforcement state for a new channel
+       pub fn new() -> Self {
+               EnforcementState {
+                       last_counterparty_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_counterparty_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_revoked_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+                       last_holder_commitment: INITIAL_REVOKED_COMMITMENT_NUMBER,
+               }
+       }
+}
index bcd460ee1b11e1343add61ccee1bfb9c713726a1..7a9ce06910b975d8ead47607a5272e1dcf92d0c5 100644 (file)
@@ -20,6 +20,7 @@ use crate::chain::transaction::OutPoint;
 use crate::sign;
 use crate::events;
 use crate::events::bump_transaction::{WalletSource, Utxo};
+use crate::ln::ChannelId;
 use crate::ln::channelmanager;
 use crate::ln::chan_utils::CommitmentTransaction;
 use crate::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
@@ -31,9 +32,10 @@ use crate::offers::invoice_request::UnsignedInvoiceRequest;
 use crate::routing::gossip::{EffectiveCapacity, NetworkGraph, NodeId};
 use crate::routing::utxo::{UtxoLookup, UtxoLookupError, UtxoResult};
 use crate::routing::router::{find_route, InFlightHtlcs, Path, Route, RouteParameters, Router, ScorerAccountingForInFlightHtlcs};
-use crate::routing::scoring::{ChannelUsage, Score};
+use crate::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
+use crate::sync::RwLock;
 use crate::util::config::UserConfig;
-use crate::util::enforcing_trait_impls::{EnforcingSigner, EnforcementState};
+use crate::util::test_channel_signer::{TestChannelSigner, EnforcementState};
 use crate::util::logger::{Logger, Level, Record};
 use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
 
@@ -59,7 +61,7 @@ use regex;
 use crate::io;
 use crate::prelude::*;
 use core::cell::RefCell;
-use core::ops::DerefMut;
+use core::ops::Deref;
 use core::time::Duration;
 use crate::sync::{Mutex, Arc};
 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
@@ -100,11 +102,11 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
 pub struct TestRouter<'a> {
        pub network_graph: Arc<NetworkGraph<&'a TestLogger>>,
        pub next_routes: Mutex<VecDeque<(RouteParameters, Result<Route, LightningError>)>>,
-       pub scorer: &'a Mutex<TestScorer>,
+       pub scorer: &'a RwLock<TestScorer>,
 }
 
 impl<'a> TestRouter<'a> {
-       pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a Mutex<TestScorer>) -> Self {
+       pub fn new(network_graph: Arc<NetworkGraph<&'a TestLogger>>, scorer: &'a RwLock<TestScorer>) -> Self {
                Self { network_graph, next_routes: Mutex::new(VecDeque::new()), scorer }
        }
 
@@ -122,8 +124,8 @@ impl<'a> Router for TestRouter<'a> {
                if let Some((find_route_query, find_route_res)) = self.next_routes.lock().unwrap().pop_front() {
                        assert_eq!(find_route_query, *params);
                        if let Ok(ref route) = find_route_res {
-                               let mut binding = self.scorer.lock().unwrap();
-                               let scorer = ScorerAccountingForInFlightHtlcs::new(binding.deref_mut(), &inflight_htlcs);
+                               let scorer = self.scorer.read().unwrap();
+                               let scorer = ScorerAccountingForInFlightHtlcs::new(scorer, &inflight_htlcs);
                                for path in &route.paths {
                                        let mut aggregate_msat = 0u64;
                                        for (idx, hop) in path.hops.iter().rev().enumerate() {
@@ -150,7 +152,7 @@ impl<'a> Router for TestRouter<'a> {
                let logger = TestLogger::new();
                find_route(
                        payer, params, &self.network_graph, first_hops, &logger,
-                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.lock().unwrap().deref_mut(), &inflight_htlcs), &(),
+                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(),
                        &[42; 32]
                )
        }
@@ -173,7 +175,7 @@ impl EntropySource for OnlyReadsKeysInterface {
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }}
 
 impl SignerProvider for OnlyReadsKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] { unreachable!(); }
 
@@ -183,7 +185,7 @@ impl SignerProvider for OnlyReadsKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = Arc::new(Mutex::new(EnforcementState::new()));
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        false
@@ -195,18 +197,18 @@ impl SignerProvider for OnlyReadsKeysInterface {
 }
 
 pub struct TestChainMonitor<'a> {
-       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
-       pub monitor_updates: Mutex<HashMap<[u8; 32], Vec<channelmonitor::ChannelMonitorUpdate>>>,
-       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64, MonitorUpdateId)>>,
-       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
+       pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
+       pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
+       pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
+       pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
        /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
        /// boolean.
-       pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
+       pub expect_channel_force_closed: Mutex<Option<(ChannelId, bool)>>,
 }
 impl<'a> TestChainMonitor<'a> {
-       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<TestChannelSigner>, keys_manager: &'a TestKeysInterface) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        monitor_updates: Mutex::new(HashMap::new()),
@@ -217,18 +219,18 @@ impl<'a> TestChainMonitor<'a> {
                }
        }
 
-       pub fn complete_sole_pending_chan_update(&self, channel_id: &[u8; 32]) {
+       pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) {
                let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone();
                self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap();
        }
 }
-impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
-       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingSigner>) -> chain::ChannelMonitorUpdateStatus {
+impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(),
@@ -262,7 +264,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap();
                w.0.clear();
                monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == *monitor);
                self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
@@ -977,16 +979,16 @@ impl NodeSigner for TestKeysInterface {
 }
 
 impl SignerProvider for TestKeysInterface {
-       type Signer = EnforcingSigner;
+       type Signer = TestChannelSigner;
 
        fn generate_channel_keys_id(&self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128) -> [u8; 32] {
                self.backing.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id)
        }
 
-       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> EnforcingSigner {
+       fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
@@ -995,7 +997,7 @@ impl SignerProvider for TestKeysInterface {
                let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
                let state = self.make_enforcement_state_cell(inner.commitment_seed);
 
-               Ok(EnforcingSigner::new_with_revoked(
+               Ok(TestChannelSigner::new_with_revoked(
                        inner,
                        state,
                        self.disable_revocation_policy_check
@@ -1036,10 +1038,10 @@ impl TestKeysInterface {
                self
        }
 
-       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> EnforcingSigner {
+       pub fn derive_channel_keys(&self, channel_value_satoshis: u64, id: &[u8; 32]) -> TestChannelSigner {
                let keys = self.backing.derive_channel_keys(channel_value_satoshis, id);
                let state = self.make_enforcement_state_cell(keys.commitment_seed);
-               EnforcingSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
+               TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check)
        }
 
        fn make_enforcement_state_cell(&self, commitment_seed: [u8; 32]) -> Arc<Mutex<EnforcementState>> {
@@ -1160,7 +1162,7 @@ impl crate::util::ser::Writeable for TestScorer {
        fn write<W: crate::util::ser::Writer>(&self, _: &mut W) -> Result<(), crate::io::Error> { unreachable!(); }
 }
 
-impl Score for TestScorer {
+impl ScoreLookUp for TestScorer {
        type ScoreParams = ();
        fn channel_penalty_msat(
                &self, short_channel_id: u64, _source: &NodeId, _target: &NodeId, usage: ChannelUsage, _score_params: &Self::ScoreParams
@@ -1176,7 +1178,9 @@ impl Score for TestScorer {
                }
                0
        }
+}
 
+impl ScoreUpdate for TestScorer {
        fn payment_path_failed(&mut self, _actual_path: &Path, _actual_short_channel_id: u64) {}
 
        fn payment_path_successful(&mut self, _actual_path: &Path) {}
diff --git a/pending_changelog/new_channel_id_type_pr_2485.txt b/pending_changelog/new_channel_id_type_pr_2485.txt
new file mode 100644 (file)
index 0000000..4ae3c2c
--- /dev/null
@@ -0,0 +1 @@
+* In several APIs, `channel_id` parameters have been changed from type `[u8; 32]` to newly introduced `ChannelId` type, from `ln` namespace (`lightning::ln::ChannelId`) (PR #2485)