X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=b5e7d603411f259ff3b9dda02c9f153661717cc8;hb=66c4f454f0316bb8c79d53a97308c706d2d3725e;hp=d6cd6e24cadfeada3bf918a4ee949a1501fe33f3;hpb=71af4a2d1553950adadcb3c4e69446f4d276a62c;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d6cd6e24..b5e7d603 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -46,7 +46,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No use crate::ln::features::InvoiceFeatures; use crate::routing::gossip::NetworkGraph; use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteHop, RouteParameters, Router}; -use crate::routing::scoring::ProbabilisticScorer; +use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use crate::ln::msgs; use crate::ln::onion_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -501,9 +501,11 @@ struct ClaimablePayments { /// for some reason. They are handled in timer_tick_occurred, so may be processed with /// quite some time lag. enum BackgroundEvent { - /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder - /// commitment transaction. - ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)), + /// Handle a ChannelMonitorUpdate + /// + /// Note that any such events are lost on shutdown, so in general they must be updates which + /// are regenerated on startup. + MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), } #[derive(Debug)] @@ -623,7 +625,9 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, - Arc>>, Arc>>> + Arc>>, Arc>>>, + ProbabilisticScoringFeeParameters, + ProbabilisticScorer>>, Arc>, >>, Arc >; @@ -639,7 +643,7 @@ pub type SimpleArcChannelManager = ChannelManager< /// of [`KeysManager`] and [`DefaultRouter`]. /// /// This is not exported to bindings users as Arcs don't make sense in bindings -pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>>, &'g L>; +pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>; /// A trivial trait which describes any [`ChannelManager`] used in testing. #[cfg(any(test, feature = "_test_utils"))] @@ -1270,8 +1274,14 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, + /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of + /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than + /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a + /// route which is valid. + pub next_outbound_htlc_minimum_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new inbound HTLCs). @@ -1391,6 +1401,7 @@ impl ChannelDetails { inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, + next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat, user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), @@ -2728,10 +2739,9 @@ where let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?; let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?; - if onion_utils::route_size_insane(&onion_payloads) { - return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()}); - } - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); + + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash) + .map_err(|_| APIError::InvalidRoute { err: "Route size too large considering onion data".to_owned()})?; let err: Result<(), _> = loop { let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) { @@ -3090,6 +3100,12 @@ where } } self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { + if tx.output.len() > u16::max_value() as usize { + return Err(APIError::APIMisuseError { + err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + }); + } + let mut output_index = None; let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh(); for (idx, outp) in tx.output.iter().enumerate() { @@ -3099,11 +3115,6 @@ where err: "Multiple outputs matched the expected script and value".to_owned() }); } - if idx > u16::max_value() as usize { - return Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() - }); - } output_index = Some(idx as u16); } } @@ -3773,7 +3784,7 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => { + BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. let _ = self.chain_monitor.update_channel(funding_txo, &update); @@ -4513,7 +4524,7 @@ where if let Some(tx) = funding_broadcastable { log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - self.tx_broadcaster.broadcast_transaction(&tx); + self.tx_broadcaster.broadcast_transactions(&[&tx]); } { @@ -5043,7 +5054,7 @@ where }; if let Some(broadcast_tx) = tx { log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx)); - self.tx_broadcaster.broadcast_transaction(&broadcast_tx); + self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); } if let Some(chan) = chan_option { if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -5653,7 +5664,7 @@ where self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); + self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, chan); false } else { true } @@ -5693,7 +5704,7 @@ where if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } - self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update))); + self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update))); } self.finish_force_close_channel(failure); } @@ -6947,10 +6958,9 @@ impl Writeable for ChannelDetails { (14, user_channel_id_low, required), (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), - // Note that by the time we get past the required read above, outbound_capacity_msat will be - // filled in, so we can safely unwrap it here. - (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), + (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), + (21, self.next_outbound_htlc_minimum_msat, required), (22, self.confirmations_required, option), (24, self.force_close_spend_delay, option), (26, self.is_outbound, required), @@ -6987,6 +6997,7 @@ impl Readable for ChannelDetails { // filled in, so we can safely unwrap it here. (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)), (20, inbound_capacity_msat, required), + (21, next_outbound_htlc_minimum_msat, (default_value, 0)), (22, confirmations_required, option), (24, force_close_spend_delay, option), (26, is_outbound, required), @@ -7020,6 +7031,7 @@ impl Readable for ChannelDetails { balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), + next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), inbound_capacity_msat: inbound_capacity_msat.0.unwrap(), confirmations_required, confirmations, @@ -7447,17 +7459,12 @@ where } } - let background_events = self.pending_background_events.lock().unwrap(); - (background_events.len() as u64).write(writer)?; - for event in background_events.iter() { - match event { - BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => { - 0u8.write(writer)?; - funding_txo.write(writer)?; - monitor_update.write(writer)?; - }, - } - } + // LDK versions prior to 0.0.116 wrote the `pending_background_events` + // `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so - + // the closing monitor updates were always effectively replayed on startup (either directly + // by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during + // deserialization or, in 0.0.115, by regenerating the monitor update itself). + 0u64.write(writer)?; // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is @@ -7772,7 +7779,7 @@ where log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some(monitor_update) = monitor_update { - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update)); + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update)); } failed_htlcs.append(&mut new_failed_htlcs); channel_closures.push_back((events::Event::ChannelClosed { @@ -7847,7 +7854,7 @@ where update_id: CLOSED_CHANNEL_UPDATE_ID, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update))); + pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); } } @@ -7904,13 +7911,11 @@ where for _ in 0..background_event_count { match ::read(reader)? { 0 => { - let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?); - if pending_background_events.iter().find(|e| { - let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e; - *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update - }).is_none() { - pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update))); - } + // LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here, + // however we really don't (and never did) need them - we regenerate all + // on-startup monitor updates. + let _: OutPoint = Readable::read(reader)?; + let _: ChannelMonitorUpdate = Readable::read(reader)?; } _ => return Err(DecodeError::InvalidValue), } @@ -8588,7 +8593,7 @@ mod tests { }; let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &random_seed_bytes + None, nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -8622,7 +8627,7 @@ mod tests { let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, - None, nodes[0].logger, &scorer, &random_seed_bytes + None, nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap(); @@ -8685,7 +8690,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &random_seed_bytes + nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -8729,7 +8734,7 @@ mod tests { let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), - nodes[0].logger, &scorer, &random_seed_bytes + nodes[0].logger, &scorer, &(), &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -9269,7 +9274,7 @@ mod tests { } } -#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))] +#[cfg(ldk_bench)] pub mod bench { use crate::chain::Listen; use crate::chain::chainmonitor::{ChainMonitor, Persist}; @@ -9289,7 +9294,7 @@ pub mod bench { use crate::sync::{Arc, Mutex}; - use test::Bencher; + use criterion::Criterion; type Manager<'a, P> = ChannelManager< &'a ChainMonitor Option<&test_utils::TestChainMonitor> { None } } - #[cfg(test)] - #[bench] - fn bench_sends(bench: &mut Bencher) { - bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new()); + pub fn bench_sends(bench: &mut Criterion) { + bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new()); } - pub fn bench_two_sends>(bench: &mut Bencher, persister_a: P, persister_b: P) { + pub fn bench_two_sends>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) { // Do a simple benchmark of sending a payment back and forth between two nodes. // Note that this is unrealistic as each payment send will require at least two fsync // calls per node. @@ -9386,10 +9389,7 @@ pub mod bench { assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]); - let block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, - txdata: vec![tx], - }; + let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]); Listen::block_connected(&node_a, &block, 1); Listen::block_connected(&node_b, &block, 1); @@ -9472,9 +9472,9 @@ pub mod bench { } } - bench.iter(|| { + bench.bench_function(bench_name, |b| b.iter(|| { send_payment!(node_a, node_b); send_payment!(node_b, node_a); - }); + })); } }