Expose the historical success probability calculation itself
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 951ce9787c7ba44602d99db276f03d29054e7ec8..565eb83ef8d5cad0cc3e4846efa87e58d470a979 100644 (file)
@@ -46,7 +46,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No
 use crate::ln::features::InvoiceFeatures;
 use crate::routing::gossip::NetworkGraph;
 use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router};
-use crate::routing::scoring::ProbabilisticScorer;
+use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -501,9 +501,11 @@ struct ClaimablePayments {
 /// for some reason. They are handled in timer_tick_occurred, so may be processed with
 /// quite some time lag.
 enum BackgroundEvent {
-       /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
-       /// commitment transaction.
-       ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+       /// Handle a ChannelMonitorUpdate
+       ///
+       /// Note that any such events are lost on shutdown, so in general they must be updates which
+       /// are regenerated on startup.
+       MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
 }
 
 #[derive(Debug)]
@@ -623,7 +625,9 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
-               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
+               Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
+               ProbabilisticScoringFeeParameters,
+               ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
        >>,
        Arc<L>
 >;
@@ -639,7 +643,7 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
 /// of [`KeysManager`] and [`DefaultRouter`].
 ///
 /// This is not exported to bindings users as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
 
 /// A trivial trait which describes any [`ChannelManager`] used in testing.
 #[cfg(any(test, feature = "_test_utils"))]
@@ -2728,10 +2732,9 @@ where
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
                let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
-               if onion_utils::route_size_insane(&onion_payloads) {
-                       return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
-               }
-               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
+
+               let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash)
+                       .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?;
 
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
@@ -3774,7 +3777,7 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -5694,7 +5697,7 @@ where
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
-                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
                        }
                        self.finish_force_close_channel(failure);
                }
@@ -7449,10 +7452,10 @@ where
                }
 
                // LDK versions prior to 0.0.116 wrote the `pending_background_events`
-               // `ClosingMonitorUpdate`s here, however there was never a reason to do so - the closing
-               // monitor updates were always effectively replayed on startup (either directly by calling
-               // `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during deserialization
-               // or, in 0.0.115, by regenerating the monitor update itself).
+               // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
+               // the closing monitor updates were always effectively replayed on startup (either directly
+               // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
+               // deserialization or, in 0.0.115, by regenerating the monitor update itself).
                0u64.write(writer)?;
 
                // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
@@ -7768,7 +7771,7 @@ where
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        if let Some(monitor_update) = monitor_update {
-                                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
+                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
@@ -7843,7 +7846,7 @@ where
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
+                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }
 
@@ -7900,7 +7903,7 @@ where
                for _ in 0..background_event_count {
                        match <u8 as Readable>::read(reader)? {
                                0 => {
-                                       // LDK versions prior to 0.0.116 wrote pending `ClosingMonitorUpdate`s here,
+                                       // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
                                        // however we really don't (and never did) need them - we regenerate all
                                        // on-startup monitor updates.
                                        let _: OutPoint = Readable::read(reader)?;
@@ -8582,7 +8585,7 @@ mod tests {
                };
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -8616,7 +8619,7 @@ mod tests {
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
                let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -8679,7 +8682,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
@@ -8723,7 +8726,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       nodes[0].logger, &scorer, &(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
@@ -9263,7 +9266,7 @@ mod tests {
        }
 }
 
-#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
+#[cfg(ldk_bench)]
 pub mod bench {
        use crate::chain::Listen;
        use crate::chain::chainmonitor::{ChainMonitor, Persist};
@@ -9283,7 +9286,7 @@ pub mod bench {
 
        use crate::sync::{Arc, Mutex};
 
-       use test::Bencher;
+       use criterion::Criterion;
 
        type Manager<'a, P> = ChannelManager<
                &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
@@ -9304,13 +9307,11 @@ pub mod bench {
                fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
        }
 
-       #[cfg(test)]
-       #[bench]
-       fn bench_sends(bench: &mut Bencher) {
-               bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+       pub fn bench_sends(bench: &mut Criterion) {
+               bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
        }
 
-       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
                // Do a simple benchmark of sending a payment back and forth between two nodes.
                // Note that this is unrealistic as each payment send will require at least two fsync
                // calls per node.
@@ -9380,10 +9381,7 @@ pub mod bench {
 
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
 
-               let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
-                       txdata: vec![tx],
-               };
+               let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
 
@@ -9466,9 +9464,9 @@ pub mod bench {
                        }
                }
 
-               bench.iter(|| {
+               bench.bench_function(bench_name, |b| b.iter(|| {
                        send_payment!(node_a, node_b);
                        send_payment!(node_b, node_a);
-               });
+               }));
        }
 }