Merge pull request #2634 from TheBlueMatt/2023-09-claimable-unwrap
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 3 Oct 2023 17:12:08 +0000 (17:12 +0000)
committerGitHub <noreply@github.com>
Tue, 3 Oct 2023 17:12:08 +0000 (17:12 +0000)
Avoid unwrap'ing `channel_parameters` in to_counterparty signing

17 files changed:
fuzz/src/chanmon_consistency.rs
lightning-background-processor/src/lib.rs
lightning-invoice/src/payment.rs
lightning/src/chain/chainmonitor.rs
lightning/src/chain/channelmonitor.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/msgs.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/offers/offer.rs
lightning/src/onion_message/messenger.rs
lightning/src/routing/router.rs
lightning/src/routing/scoring.rs
lightning/src/sign/mod.rs
lightning/src/util/persist.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs

index 6a2007165d71773cbb3d39ea5ea0dd2187aca6bb..2a1b9e9a70a2fff9f4026879b26249ee79d89357 100644 (file)
@@ -155,7 +155,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
                };
                let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::
                        read(&mut Cursor::new(&map_entry.get().1), (&*self.keys, &*self.keys)).unwrap().1;
-               deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
+               deserialized_monitor.update_monitor(update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap();
                let mut ser = VecWriter(Vec::new());
                deserialized_monitor.write(&mut ser).unwrap();
                map_entry.insert((update.update_id, ser.0));
index efa1a42142c7c428405f9211019680e997aeaa57..fb20514efd160185745438b03d44f5faeedf7bd8 100644 (file)
@@ -501,7 +501,7 @@ use core::task;
 /// could setup `process_events_async` like this:
 /// ```
 /// # use lightning::io;
-/// # use std::sync::{Arc, Mutex};
+/// # use std::sync::{Arc, RwLock};
 /// # use std::sync::atomic::{AtomicBool, Ordering};
 /// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # struct MyStore {}
@@ -532,7 +532,7 @@ use core::task;
 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
-/// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
+/// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
 ///
 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
 ///    let background_persister = Arc::clone(&my_persister);
@@ -1181,7 +1181,7 @@ mod tests {
                        let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
                        let scorer = Arc::new(Mutex::new(TestScorer::new()));
                        let seed = [i as u8; 32];
-                       let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
+                       let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
                        let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
                        let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
                        let now = Duration::from_secs(genesis_block.header.time as u64);
index 08be3d54ba502a97d0ae36b80407a290c846c10d..0247913634a24f30cab3e8bb2a77ab3c49d7fdf1 100644 (file)
@@ -9,7 +9,8 @@
 
 //! Convenient utilities for paying Lightning invoices.
 
-use crate::{Bolt11Invoice, Vec};
+use crate::Bolt11Invoice;
+use crate::prelude::*;
 
 use bitcoin_hashes::Hash;
 
index 261c0471ca4089dfaa1d21ca0e8c8f810f90c7a5..e87d082d9a7bb9061894ce26fb15da0087a7c173 100644 (file)
@@ -324,7 +324,6 @@ where C::Target: chain::Filter,
                                if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() {
                                        // Take the monitors lock for writing so that we poison it and any future
                                        // operations going forward fail immediately.
-                                       core::mem::drop(monitor_state);
                                        core::mem::drop(monitor_lock);
                                        let _poison = self.monitors.write().unwrap();
                                        log_error!(self.logger, "{}", err_str);
@@ -767,7 +766,7 @@ where C::Target: chain::Filter,
                        Some(monitor_state) => {
                                let monitor = &monitor_state.monitor;
                                log_trace!(self.logger, "Updating ChannelMonitor for channel {}", log_funding_info!(monitor));
-                               let update_res = monitor.update_monitor(update, &self.broadcaster, &*self.fee_estimator, &self.logger);
+                               let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
 
                                let update_id = MonitorUpdateId::from_monitor_update(update);
                                let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
index a0ac1657e938baf766b7e9bc281370eac6fece12..bd0c15484283b5a12d0c815d13afb9310acb688e 100644 (file)
@@ -1311,7 +1311,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitor<Signer> {
                &self,
                updates: &ChannelMonitorUpdate,
                broadcaster: &B,
-               fee_estimator: F,
+               fee_estimator: &F,
                logger: &L,
        ) -> Result<(), ()>
        where
@@ -2615,7 +2615,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
        }
 
-       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: F, logger: &L) -> Result<(), ()>
+       pub fn update_monitor<B: Deref, F: Deref, L: Deref>(&mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &L) -> Result<(), ()>
        where B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
                L::Target: Logger,
@@ -2655,7 +2655,7 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
                        panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
                }
                let mut ret = Ok(());
-               let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&*fee_estimator);
+               let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&**fee_estimator);
                for update in updates.updates.iter() {
                        match update {
                                ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => {
@@ -4581,7 +4581,7 @@ mod tests {
 
                let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
                assert!(
-                       pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
+                       pre_update_monitor.update_monitor(&replay_update, &&broadcaster, &&chanmon_cfgs[1].fee_estimator, &nodes[1].logger)
                        .is_err());
                // Even though we error'd on the first update, we should still have generated an HTLC claim
                // transaction
index 3f6a2a6b20dfcc692df81fad93ac93c4d37a068e..62c6741fbdf89f25595e2c855b7dbb2fdc2e609a 100644 (file)
@@ -803,7 +803,7 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
-               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+               Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
                ProbabilisticScoringFeeParameters,
                ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
        >>,
@@ -832,7 +832,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
                &'e DefaultRouter<
                        &'f NetworkGraph<&'g L>,
                        &'g L,
-                       &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
+                       &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
                        ProbabilisticScoringFeeParameters,
                        ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
                >,
@@ -840,6 +840,9 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
        >;
 
 /// A trivial trait which describes any [`ChannelManager`].
+///
+/// This is not exported to bindings users as general cover traits aren't useful in other
+/// languages.
 pub trait AChannelManager {
        /// A type implementing [`chain::Watch`].
        type Watch: chain::Watch<Self::Signer> + ?Sized;
index e4cc1f1955fe65ccacc62d9efc97c85ab94968af..a68ac35218594689cd36402b7b4c05df3fa7321c 100644 (file)
@@ -39,10 +39,12 @@ use crate::onion_message;
 use crate::sign::{NodeSigner, Recipient};
 
 use crate::prelude::*;
+#[cfg(feature = "std")]
 use core::convert::TryFrom;
 use core::fmt;
 use core::fmt::Debug;
 use core::ops::Deref;
+#[cfg(feature = "std")]
 use core::str::FromStr;
 use crate::io::{self, Cursor, Read};
 use crate::io_extras::read_to_end;
@@ -956,7 +958,10 @@ impl From<std::net::SocketAddr> for SocketAddress {
                }
 }
 
-fn parse_onion_address(host: &str, port: u16) -> Result<SocketAddress, SocketAddressParseError> {
+/// Parses an OnionV3 host and port into a [`SocketAddress::OnionV3`].
+///
+/// The host part must end with ".onion".
+pub fn parse_onion_address(host: &str, port: u16) -> Result<SocketAddress, SocketAddressParseError> {
        if host.ends_with(".onion") {
                let domain = &host[..host.len() - ".onion".len()];
                if domain.len() != 56 {
index 1642f28efc7b36dbfa7739bf886ebb58b9eb7c70..b806b138a59092721d10b8e9289d9f0d28df23a0 100644 (file)
@@ -594,10 +594,26 @@ impl RecipientOnionFields {
        /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each
        /// represented by a `(u64, Vec<u8>)` for its type number and serialized value respectively.
        /// This is validated when setting this field using [`Self::with_custom_tlvs`].
+       #[cfg(not(c_bindings))]
        pub fn custom_tlvs(&self) -> &Vec<(u64, Vec<u8>)> {
                &self.custom_tlvs
        }
 
+       /// Gets the custom TLVs that will be sent or have been received.
+       ///
+       /// Custom TLVs allow sending extra application-specific data with a payment. They provide
+       /// additional flexibility on top of payment metadata, as while other implementations may
+       /// require `payment_metadata` to reflect metadata provided in an invoice, custom TLVs
+       /// do not have this restriction.
+       ///
+       /// Note that if this field is non-empty, it will contain strictly increasing TLVs, each
+       /// represented by a `(u64, Vec<u8>)` for its type number and serialized value respectively.
+       /// This is validated when setting this field using [`Self::with_custom_tlvs`].
+       #[cfg(c_bindings)]
+       pub fn custom_tlvs(&self) -> Vec<(u64, Vec<u8>)> {
+               self.custom_tlvs.clone()
+       }
+
        /// When we have received some HTLC(s) towards an MPP payment, as we receive further HTLC(s) we
        /// have to make sure that some fields match exactly across the parts. For those that aren't
        /// required to match, if they don't match we should remove them so as to not expose data
index 72176760243bf25cae3dd3adfcb656a4be6417cd..26ecbb0bd249d116cf8e54f440145c60d1796bbd 100644 (file)
@@ -3781,7 +3781,7 @@ fn test_retry_custom_tlvs() {
                payment_hash, Some(payment_secret), events.pop().unwrap(), true, None).unwrap();
        match payment_claimable {
                Event::PaymentClaimable { onion_fields, .. } => {
-                       assert_eq!(onion_fields.unwrap().custom_tlvs(), &custom_tlvs);
+                       assert_eq!(&onion_fields.unwrap().custom_tlvs()[..], &custom_tlvs[..]);
                },
                _ => panic!("Unexpected event"),
        };
index e0bc63e8b2b8109e0b9243a1a32ecfb0a5a3d073..60621b9dc7491686fbf3207a532f0e2bc25a5aa5 100644 (file)
@@ -366,7 +366,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => {
        /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet).
        /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats)
        /// for the selected chain.
-       pub fn chains(&$self) -> Vec<$crate::bitcoin::blockdata::constants::ChainHash> {
+       pub fn chains(&$self) -> Vec<bitcoin::blockdata::constants::ChainHash> {
                $contents.chains()
        }
 
@@ -418,7 +418,7 @@ macro_rules! offer_accessors { ($self: ident, $contents: expr) => {
        }
 
        /// The public key used by the recipient to sign invoices.
-       pub fn signing_pubkey(&$self) -> $crate::bitcoin::secp256k1::PublicKey {
+       pub fn signing_pubkey(&$self) -> bitcoin::secp256k1::PublicKey {
                $contents.signing_pubkey()
        }
 } }
index 2a9c830d38bec8e7a56e6e4cb98c6389f6cce60b..03dc6a5b002f01be6e706396190f1eddeb84f839 100644 (file)
@@ -246,6 +246,64 @@ pub trait CustomOnionMessageHandler {
        fn read_custom_message<R: io::Read>(&self, message_type: u64, buffer: &mut R) -> Result<Option<Self::CustomMessage>, msgs::DecodeError>;
 }
 
+
+/// Create an onion message with contents `message` to the destination of `path`.
+/// Returns (introduction_node_id, onion_msg)
+pub fn create_onion_message<ES: Deref, NS: Deref, T: CustomOnionMessageContents>(
+       entropy_source: &ES, node_signer: &NS, secp_ctx: &Secp256k1<secp256k1::All>,
+       path: OnionMessagePath, message: OnionMessageContents<T>, reply_path: Option<BlindedPath>,
+) -> Result<(PublicKey, msgs::OnionMessage), SendError>
+where
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+{
+       let OnionMessagePath { intermediate_nodes, mut destination } = path;
+       if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
+               if blinded_hops.len() < 2 {
+                       return Err(SendError::TooFewBlindedHops);
+               }
+       }
+
+       if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) }
+
+       // If we are sending straight to a blinded path and we are the introduction node, we need to
+       // advance the blinded path by 1 hop so the second hop is the new introduction node.
+       if intermediate_nodes.len() == 0 {
+               if let Destination::BlindedPath(ref mut blinded_path) = destination {
+                       let our_node_id = node_signer.get_node_id(Recipient::Node)
+                               .map_err(|()| SendError::GetNodeIdFailed)?;
+                       if blinded_path.introduction_node_id == our_node_id {
+                               advance_path_by_one(blinded_path, node_signer, &secp_ctx)
+                                       .map_err(|()| SendError::BlindedPathAdvanceFailed)?;
+                       }
+               }
+       }
+
+       let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
+       let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
+       let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
+               (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret))
+       } else {
+               match destination {
+                       Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
+                       Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) =>
+                               (introduction_node_id, blinding_point),
+               }
+       };
+       let (packet_payloads, packet_keys) = packet_payloads_and_keys(
+               &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret)
+               .map_err(|e| SendError::Secp256k1(e))?;
+
+       let prng_seed = entropy_source.get_secure_random_bytes();
+       let onion_routing_packet = construct_onion_message_packet(
+               packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
+
+       Ok((introduction_node_id, msgs::OnionMessage {
+               blinding_point,
+               onion_routing_packet
+       }))
+}
+
 impl<ES: Deref, NS: Deref, L: Deref, MR: Deref, OMH: Deref, CMH: Deref>
 OnionMessenger<ES, NS, L, MR, OMH, CMH>
 where
@@ -283,13 +341,9 @@ where
                &self, path: OnionMessagePath, message: OnionMessageContents<T>,
                reply_path: Option<BlindedPath>
        ) -> Result<(), SendError> {
-               let (introduction_node_id, onion_msg) = Self::create_onion_message(
-                       &self.entropy_source, 
-                       &self.node_signer, 
-                       &self.secp_ctx, 
-                       path, 
-                       message, 
-                       reply_path
+               let (introduction_node_id, onion_msg) = create_onion_message(
+                       &self.entropy_source, &self.node_signer, &self.secp_ctx,
+                       path, message, reply_path
                )?;
 
                let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
@@ -303,63 +357,6 @@ where
                }
        }
 
-       /// Create an onion message with contents `message` to the destination of `path`.
-       /// Returns (introduction_node_id, onion_msg)
-       pub fn create_onion_message<T: CustomOnionMessageContents>(
-               entropy_source: &ES, 
-               node_signer: &NS,
-               secp_ctx: &Secp256k1<secp256k1::All>,
-               path: OnionMessagePath, 
-               message: OnionMessageContents<T>,
-               reply_path: Option<BlindedPath>, 
-       ) -> Result<(PublicKey, msgs::OnionMessage), SendError> {
-               let OnionMessagePath { intermediate_nodes, mut destination } = path;
-               if let Destination::BlindedPath(BlindedPath { ref blinded_hops, .. }) = destination {
-                       if blinded_hops.len() < 2 {
-                               return Err(SendError::TooFewBlindedHops);
-                       }
-               }
-
-               if message.tlv_type() < 64 { return Err(SendError::InvalidMessage) }
-
-               // If we are sending straight to a blinded path and we are the introduction node, we need to
-               // advance the blinded path by 1 hop so the second hop is the new introduction node.
-               if intermediate_nodes.len() == 0 {
-                       if let Destination::BlindedPath(ref mut blinded_path) = destination {
-                               let our_node_id = node_signer.get_node_id(Recipient::Node)
-                                       .map_err(|()| SendError::GetNodeIdFailed)?;
-                               if blinded_path.introduction_node_id == our_node_id {
-                                       advance_path_by_one(blinded_path, node_signer, &secp_ctx)
-                                               .map_err(|()| SendError::BlindedPathAdvanceFailed)?;
-                               }
-                       }
-               }
-
-               let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
-               let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
-               let (introduction_node_id, blinding_point) = if intermediate_nodes.len() != 0 {
-                       (intermediate_nodes[0], PublicKey::from_secret_key(&secp_ctx, &blinding_secret))
-               } else {
-                       match destination {
-                               Destination::Node(pk) => (pk, PublicKey::from_secret_key(&secp_ctx, &blinding_secret)),
-                               Destination::BlindedPath(BlindedPath { introduction_node_id, blinding_point, .. }) =>
-                                       (introduction_node_id, blinding_point),
-                       }
-               };
-               let (packet_payloads, packet_keys) = packet_payloads_and_keys(
-                       &secp_ctx, &intermediate_nodes, destination, message, reply_path, &blinding_secret)
-                       .map_err(|e| SendError::Secp256k1(e))?;
-
-               let prng_seed = entropy_source.get_secure_random_bytes();
-               let onion_routing_packet = construct_onion_message_packet(
-                       packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
-
-               Ok((introduction_node_id, msgs::OnionMessage {
-                       blinding_point,
-                       onion_routing_packet
-               }))
-       }
-
        fn respond_with_onion_message<T: CustomOnionMessageContents>(
                &self, response: OnionMessageContents<T>, path_id: Option<[u8; 32]>,
                reply_path: Option<BlindedPath>
index 1297322fbd49d6c0a16582e2b06be5858bfff6f2..28d452c56b616afde8e1aea886e5a0bfe65f8eb0 100644 (file)
@@ -112,12 +112,12 @@ pub trait Router {
 /// [`find_route`].
 ///
 /// [`ScoreLookUp`]: crate::routing::scoring::ScoreLookUp
-pub struct ScorerAccountingForInFlightHtlcs<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> {
+pub struct ScorerAccountingForInFlightHtlcs<'a, S: Deref> where S::Target: ScoreLookUp {
        scorer: S,
        // Maps a channel's short channel id and its direction to the liquidity used up.
        inflight_htlcs: &'a InFlightHtlcs,
 }
-impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
+impl<'a, S: Deref> ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp {
        /// Initialize a new `ScorerAccountingForInFlightHtlcs`.
        pub fn new(scorer: S, inflight_htlcs: &'a InFlightHtlcs) -> Self {
                ScorerAccountingForInFlightHtlcs {
@@ -127,13 +127,8 @@ impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Sc
        }
 }
 
-#[cfg(c_bindings)]
-impl<'a, SP: Sized, Sc: ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> Writeable for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { self.scorer.write(writer) }
-}
-
-impl<'a, SP: Sized, Sc: 'a + ScoreLookUp<ScoreParams = SP>, S: Deref<Target = Sc>> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, SP, Sc, S> {
-       type ScoreParams = Sc::ScoreParams;
+impl<'a, S: Deref> ScoreLookUp for ScorerAccountingForInFlightHtlcs<'a, S> where S::Target: ScoreLookUp {
+       type ScoreParams = <S::Target as ScoreLookUp>::ScoreParams;
        fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams) -> u64 {
                if let Some(used_liquidity) = self.inflight_htlcs.used_liquidity_msat(
                        source, target, short_channel_id
index c790f5df5c360b8873198703a4493d55ff664443..207e1d69bd98d2b66974884ab1226f1b36562a73 100644 (file)
@@ -89,7 +89,7 @@ macro_rules! define_score { ($($supertrait: path)*) => {
 /// `ScoreLookUp` is used to determine the penalty for a given channel.
 ///
 /// Scoring is in terms of fees willing to be paid in order to avoid routing through a channel.
-pub trait ScoreLookUp $(: $supertrait)* {
+pub trait ScoreLookUp {
        /// A configurable type which should contain various passed-in parameters for configuring the scorer,
        /// on a per-routefinding-call basis through to the scorer methods,
        /// which are used to determine the parameters for the suitability of channels for use.
@@ -108,7 +108,7 @@ pub trait ScoreLookUp $(: $supertrait)* {
 }
 
 /// `ScoreUpdate` is used to update the scorer's internal state after a payment attempt.
-pub trait ScoreUpdate $(: $supertrait)* {
+pub trait ScoreUpdate {
        /// Handles updating channel penalties after failing to route through a channel.
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64);
 
@@ -122,8 +122,20 @@ pub trait ScoreUpdate $(: $supertrait)* {
        fn probe_successful(&mut self, path: &Path);
 }
 
-impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supertrait)*> ScoreLookUp for T {
-       type ScoreParams = SP;
+/// A trait which can both lookup and update routing channel penalty scores.
+///
+/// This is used in places where both bounds are required and implemented for all types which
+/// implement [`ScoreLookUp`] and [`ScoreUpdate`].
+///
+/// Bindings users may need to manually implement this for their custom scoring implementations.
+pub trait Score : ScoreLookUp + ScoreUpdate $(+ $supertrait)* {}
+
+#[cfg(not(c_bindings))]
+impl<T: ScoreLookUp + ScoreUpdate $(+ $supertrait)*> Score for T {}
+
+#[cfg(not(c_bindings))]
+impl<S: ScoreLookUp, T: Deref<Target=S>> ScoreLookUp for T {
+       type ScoreParams = S::ScoreParams;
        fn channel_penalty_msat(
                &self, short_channel_id: u64, source: &NodeId, target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
        ) -> u64 {
@@ -131,7 +143,8 @@ impl<SP: Sized, S: ScoreLookUp<ScoreParams = SP>, T: Deref<Target=S> $(+ $supert
        }
 }
 
-impl<S: ScoreUpdate, T: DerefMut<Target=S> $(+ $supertrait)*> ScoreUpdate for T {
+#[cfg(not(c_bindings))]
+impl<S: ScoreUpdate, T: DerefMut<Target=S>> ScoreUpdate for T {
        fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
                self.deref_mut().payment_path_failed(path, short_channel_id)
        }
@@ -192,7 +205,7 @@ pub trait WriteableScore<'a>: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
 impl<'a, T> WriteableScore<'a> for T where T: LockableScore<'a> + Writeable {}
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
+impl<'a, T: Score + 'a> LockableScore<'a> for Mutex<T> {
        type ScoreUpdate = T;
        type ScoreLookUp = T;
 
@@ -209,7 +222,7 @@ impl<'a, T: 'a + ScoreLookUp + ScoreUpdate> LockableScore<'a> for Mutex<T> {
 }
 
 #[cfg(not(c_bindings))]
-impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
+impl<'a, T: Score + 'a> LockableScore<'a> for RefCell<T> {
        type ScoreUpdate = T;
        type ScoreLookUp = T;
 
@@ -226,7 +239,7 @@ impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> LockableScore<'a> for RefCell<T> {
 }
 
 #[cfg(not(c_bindings))]
-impl<'a, SP:Sized,  T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> LockableScore<'a> for RwLock<T> {
+impl<'a, T: Score + 'a> LockableScore<'a> for RwLock<T> {
        type ScoreUpdate = T;
        type ScoreLookUp = T;
 
@@ -244,12 +257,12 @@ impl<'a, SP:Sized,  T: 'a + ScoreUpdate + ScoreLookUp<ScoreParams = SP>> Lockabl
 
 #[cfg(c_bindings)]
 /// A concrete implementation of [`LockableScore`] which supports multi-threading.
-pub struct MultiThreadedLockableScore<T: ScoreLookUp + ScoreUpdate> {
+pub struct MultiThreadedLockableScore<T: Score> {
        score: RwLock<T>,
 }
 
 #[cfg(c_bindings)]
-impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> LockableScore<'a> for MultiThreadedLockableScore<T> {
+impl<'a, T: Score + 'a> LockableScore<'a> for MultiThreadedLockableScore<T> {
        type ScoreUpdate = T;
        type ScoreLookUp = T;
        type WriteLocked = MultiThreadedScoreLockWrite<'a, Self::ScoreUpdate>;
@@ -265,17 +278,17 @@ impl<'a, SP:Sized, T: 'a + ScoreLookUp<ScoreParams = SP> + ScoreUpdate> Lockable
 }
 
 #[cfg(c_bindings)]
-impl<T: ScoreUpdate + ScoreLookUp> Writeable for MultiThreadedLockableScore<T> {
+impl<T: Score> Writeable for MultiThreadedLockableScore<T> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                self.score.read().unwrap().write(writer)
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + ScoreUpdate + ScoreLookUp> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
+impl<'a, T: Score + 'a> WriteableScore<'a> for MultiThreadedLockableScore<T> {}
 
 #[cfg(c_bindings)]
-impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
+impl<T: Score> MultiThreadedLockableScore<T> {
        /// Creates a new [`MultiThreadedLockableScore`] given an underlying [`Score`].
        pub fn new(score: T) -> Self {
                MultiThreadedLockableScore { score: RwLock::new(score) }
@@ -284,14 +297,14 @@ impl<T: ScoreLookUp + ScoreUpdate> MultiThreadedLockableScore<T> {
 
 #[cfg(c_bindings)]
 /// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLockRead<'a, T: ScoreLookUp>(RwLockReadGuard<'a, T>);
+pub struct MultiThreadedScoreLockRead<'a, T: Score>(RwLockReadGuard<'a, T>);
 
 #[cfg(c_bindings)]
 /// A locked `MultiThreadedLockableScore`.
-pub struct MultiThreadedScoreLockWrite<'a, T: ScoreUpdate>(RwLockWriteGuard<'a, T>);
+pub struct MultiThreadedScoreLockWrite<'a, T: Score>(RwLockWriteGuard<'a, T>);
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
+impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockRead<'a, T> {
        type Target = T;
 
        fn deref(&self) -> &Self::Target {
@@ -300,14 +313,24 @@ impl<'a, T: 'a + ScoreLookUp> Deref for MultiThreadedScoreLockRead<'a, T> {
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + ScoreUpdate> Writeable for MultiThreadedScoreLockWrite<'a, T> {
+impl<'a, T: Score> ScoreLookUp for MultiThreadedScoreLockRead<'a, T> {
+       type ScoreParams = T::ScoreParams;
+       fn channel_penalty_msat(&self, short_channel_id: u64, source: &NodeId,
+               target: &NodeId, usage: ChannelUsage, score_params: &Self::ScoreParams
+       ) -> u64 {
+               self.0.channel_penalty_msat(short_channel_id, source, target, usage, score_params)
+       }
+}
+
+#[cfg(c_bindings)]
+impl<'a, T: Score> Writeable for MultiThreadedScoreLockWrite<'a, T> {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                self.0.write(writer)
        }
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
+impl<'a, T: 'a + Score> Deref for MultiThreadedScoreLockWrite<'a, T> {
        type Target = T;
 
        fn deref(&self) -> &Self::Target {
@@ -316,12 +339,31 @@ impl<'a, T: 'a + ScoreUpdate> Deref for MultiThreadedScoreLockWrite<'a, T> {
 }
 
 #[cfg(c_bindings)]
-impl<'a, T: 'a + ScoreUpdate> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
+impl<'a, T: 'a + Score> DerefMut for MultiThreadedScoreLockWrite<'a, T> {
        fn deref_mut(&mut self) -> &mut Self::Target {
                self.0.deref_mut()
        }
 }
 
+#[cfg(c_bindings)]
+impl<'a, T: Score> ScoreUpdate for MultiThreadedScoreLockWrite<'a, T> {
+       fn payment_path_failed(&mut self, path: &Path, short_channel_id: u64) {
+               self.0.payment_path_failed(path, short_channel_id)
+       }
+
+       fn payment_path_successful(&mut self, path: &Path) {
+               self.0.payment_path_successful(path)
+       }
+
+       fn probe_failed(&mut self, path: &Path, short_channel_id: u64) {
+               self.0.probe_failed(path, short_channel_id)
+       }
+
+       fn probe_successful(&mut self, path: &Path) {
+               self.0.probe_successful(path)
+       }
+}
+
 
 /// Proposed use of a channel passed as a parameter to [`ScoreLookUp::channel_penalty_msat`].
 #[derive(Clone, Copy, Debug, PartialEq)]
@@ -1417,6 +1459,10 @@ impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> ScoreUpdate for Prob
        }
 }
 
+#[cfg(c_bindings)]
+impl<G: Deref<Target = NetworkGraph<L>>, L: Deref, T: Time> Score for ProbabilisticScorerUsingTime<G, L, T>
+where L::Target: Logger {}
+
 mod approx {
        const BITS: u32 = 64;
        const HIGHEST_BIT: u32 = BITS - 1;
index 5a5ac4968021f7bc5161e665db41d8c0b00e9091..04c4446e2c0c8c75bd4f2f7e3b415b8f35296cf2 100644 (file)
@@ -275,6 +275,9 @@ impl SpendableOutputDescriptor {
        ///
        /// Note that this does not include any signatures, just the information required to
        /// construct the transaction and sign it.
+       ///
+       /// This is not exported to bindings users as there is no standard serialization for an input.
+       /// See [`Self::create_spendable_outputs_psbt`] instead.
        pub fn to_psbt_input(&self) -> bitcoin::psbt::Input {
                match self {
                        SpendableOutputDescriptor::StaticOutput { output, .. } => {
index 2a022c37cc4a476e3bd3c2ecac076b53f25e9d87..35e473c42a690f47a216b87d344ab6a3ace87999 100644 (file)
@@ -397,11 +397,7 @@ where
        pub fn new(
                kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
                signer_provider: SP,
-       ) -> Self
-       where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
-       {
+       ) -> Self {
                MonitorUpdatingPersister {
                        kv_store,
                        logger,
@@ -416,12 +412,10 @@ where
        /// It is extremely important that your [`KVStore::read`] implementation uses the
        /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
        /// documentation for [`MonitorUpdatingPersister`].
-       pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
-               &self, broadcaster: B, fee_estimator: F,
+       pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
+               &self, broadcaster: &B, fee_estimator: &F,
        ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
        where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
        {
@@ -432,8 +426,8 @@ where
                let mut res = Vec::with_capacity(monitor_list.len());
                for monitor_key in monitor_list {
                        res.push(self.read_channel_monitor_with_updates(
-                               &broadcaster,
-                               fee_estimator.clone(),
+                               broadcaster,
+                               fee_estimator,
                                monitor_key,
                        )?)
                }
@@ -457,12 +451,10 @@ where
        /// 
        /// Loading a large number of monitors will be faster if done in parallel. You can use this
        /// function to accomplish this. Take care to limit the number of parallel readers.
-       pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
-               &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
+       pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
+               &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
        ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
        where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
        {
@@ -484,7 +476,7 @@ where
                                Err(err) => return Err(err),
                        };
 
-                       monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
+                       monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
                                .map_err(|e| {
                                        log_error!(
                                                self.logger,
@@ -949,17 +941,17 @@ mod tests {
                // Check that the persisted channel data is empty before any channels are
                // open.
                let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
-                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+                       &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data_0.len(), 0);
                let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
-                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+                       &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data_1.len(), 0);
 
                // Helper to make sure the channel is on the expected update ID.
                macro_rules! check_persisted_data {
                        ($expected_update_id: expr) => {
                                persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
-                                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+                                       &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                                // check that we stored only one monitor
                                assert_eq!(persisted_chan_data_0.len(), 1);
                                for (_, mon) in persisted_chan_data_0.iter() {
@@ -978,7 +970,7 @@ mod tests {
                                        }
                                }
                                persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
-                                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+                                       &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
                                assert_eq!(persisted_chan_data_1.len(), 1);
                                for (_, mon) in persisted_chan_data_1.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
@@ -1043,7 +1035,7 @@ mod tests {
                check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
 
                // Make sure the expected number of stale updates is present.
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                let (_, monitor) = &persisted_chan_data[0];
                let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
                // The channel should have 0 updates, as it wrote a full monitor and consolidated.
@@ -1151,7 +1143,7 @@ mod tests {
 
                // Check that the persisted channel data is empty before any channels are
                // open.
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data.len(), 0);
 
                // Create some initial channel
@@ -1162,7 +1154,7 @@ mod tests {
                send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
 
                // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                let (_, monitor) = &persisted_chan_data[0];
                let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
                persister_0
index af4de88a1a7dda59145b1ea25fa7b7e434519f76..7971f35fc97ce6ef0c8ec9c5dba4348cfdce697b 100644 (file)
@@ -17,7 +17,7 @@ use crate::prelude::*;
 use crate::io::{self, Read, Seek, Write};
 use crate::io_extras::{copy, sink};
 use core::hash::Hash;
-use crate::sync::Mutex;
+use crate::sync::{Mutex, RwLock};
 use core::cmp;
 use core::convert::TryFrom;
 use core::ops::Deref;
@@ -1195,6 +1195,18 @@ impl<T: Writeable> Writeable for Mutex<T> {
        }
 }
 
+impl<T: Readable> Readable for RwLock<T> {
+       fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let t: T = Readable::read(r)?;
+               Ok(RwLock::new(t))
+       }
+}
+impl<T: Writeable> Writeable for RwLock<T> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+               self.read().unwrap().write(w)
+       }
+}
+
 impl<A: Readable, B: Readable> Readable for (A, B) {
        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                let a: A = Readable::read(r)?;
index 4bcaca57f81ca61cd2af4d499d6ffbfa4f1b51e0..bd66e4cae3e56fa70195a4627bf38451bac37d9c 100644 (file)
@@ -140,10 +140,10 @@ impl<'a> Router for TestRouter<'a> {
                                                // Since the path is reversed, the last element in our iteration is the first
                                                // hop.
                                                if idx == path.hops.len() - 1 {
-                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &());
+                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(payer), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
                                                } else {
                                                        let curr_hop_path_idx = path.hops.len() - 1 - idx;
-                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &());
+                                                       scorer.channel_penalty_msat(hop.short_channel_id, &NodeId::from_pubkey(&path.hops[curr_hop_path_idx - 1].pubkey), &NodeId::from_pubkey(&hop.pubkey), usage, &Default::default());
                                                }
                                        }
                                }
@@ -153,7 +153,7 @@ impl<'a> Router for TestRouter<'a> {
                let logger = TestLogger::new();
                find_route(
                        payer, params, &self.network_graph, first_hops, &logger,
-                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &(),
+                       &ScorerAccountingForInFlightHtlcs::new(self.scorer.read().unwrap(), &inflight_htlcs), &Default::default(),
                        &[42; 32]
                )
        }
@@ -423,7 +423,7 @@ impl<Signer: sign::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> fo
                chain::ChannelMonitorUpdateStatus::Completed
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
                let mut ret = chain::ChannelMonitorUpdateStatus::Completed;
                if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
                        ret = update_ret;