From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Wed, 26 Aug 2020 15:14:34 +0000 (-0700) Subject: Merge pull request #676 from TheBlueMatt/2020-08-c-bindings-cleanups-3 X-Git-Tag: v0.0.12~32 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=3defcc896266f3d67848ce28981a756e971a3f0c;hp=af69fae97bcfeb3d9b3b4b84d37240f45e37bb85;p=rust-lightning Merge pull request #676 from TheBlueMatt/2020-08-c-bindings-cleanups-3 Pre-C-Bindings Cleanups #3 --- diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 65f721e4..3191eb27 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -235,7 +235,7 @@ pub fn do_test(data: &[u8], out: Out) { tx_broadcaster: broadcast.clone(), logger, default_config: config, - channel_monitors: &mut monitor_refs, + channel_monitors: monitor_refs, }; (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor) diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 38e29de5..ffc46083 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -236,7 +236,10 @@ pub fn do_test(data: &[u8], out: Out) { } &last_hops_vec[..] }; - let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target, first_hops, last_hops, slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger)); + let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target, + first_hops.map(|c| c.iter().collect::>()).as_ref().map(|a| a.as_slice()), + &last_hops.iter().collect::>(), + slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger)); }, _ => return, } diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 644c3214..fe391454 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -245,13 +245,15 @@ pub type BlockNotifierRef<'a, C> = BlockNotifier<'a, &'a ChainListener, C>; /// or a BlockNotifierRef for conciseness. See their documentation for more details, but essentially /// you should default to using a BlockNotifierRef, and use a BlockNotifierArc instead when you /// require ChainListeners with static lifetimes, such as when you're using lightning-net-tokio. -pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref> where C::Target: ChainWatchInterface { +pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref> + where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface { listeners: Mutex>, chain_monitor: C, phantom: PhantomData<&'a ()>, } -impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C> where C::Target: ChainWatchInterface { +impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C> + where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface { /// Constructs a new BlockNotifier without any listeners. pub fn new(chain_monitor: C) -> BlockNotifier<'a, CL, C> { BlockNotifier { diff --git a/lightning/src/chain/keysinterface.rs b/lightning/src/chain/keysinterface.rs index e564fa7c..5571c8b4 100644 --- a/lightning/src/chain/keysinterface.rs +++ b/lightning/src/chain/keysinterface.rs @@ -11,7 +11,7 @@ //! spendable on-chain outputs which the user owns and is responsible for using just as any other //! on-chain output which is theirs. -use bitcoin::blockdata::transaction::{Transaction, OutPoint, TxOut}; +use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::opcodes; use bitcoin::network::constants::Network; @@ -31,9 +31,10 @@ use bitcoin::secp256k1; use util::byte_utils; use util::ser::{Writeable, Writer, Readable}; +use chain::transaction::OutPoint; use ln::chan_utils; use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys}; -use ln::msgs; +use ln::msgs::UnsignedChannelAnnouncement; use std::sync::atomic::{AtomicUsize, Ordering}; use std::io::Error; @@ -316,7 +317,7 @@ pub trait ChannelKeys : Send+Clone { /// Note that if this fails or is rejected, the channel will not be publicly announced and /// our counterparty may (though likely will not) close the channel on us for violating the /// protocol. - fn sign_channel_announcement(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result; + fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result; /// Set the remote channel basepoints and remote/local to_self_delay. /// This is done immediately on incoming channels and as soon as the channel is accepted on outgoing channels. @@ -582,7 +583,7 @@ impl ChannelKeys for InMemoryChannelKeys { Ok(secp_ctx.sign(&sighash, &self.funding_key)) } - fn sign_channel_announcement(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result { + fn sign_channel_announcement(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1) -> Result { let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]); Ok(secp_ctx.sign(&msghash, &self.funding_key)) } @@ -808,7 +809,7 @@ impl KeysInterface for KeysManager { self.shutdown_pubkey.clone() } - fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> InMemoryChannelKeys { + fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> Self::ChanKeySigner { let child_ix = self.channel_child_index.fetch_add(1, Ordering::AcqRel); let ix_and_nanos: u64 = (child_ix as u64) << 32 | (self.starting_time_nanos as u64); self.derive_channel_keys(channel_value_satoshis, ix_and_nanos, self.starting_time_secs) diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index e315398d..998ac9cd 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -30,6 +30,7 @@ use util::byte_utils; use bitcoin::secp256k1::key::{SecretKey, PublicKey}; use bitcoin::secp256k1::{Secp256k1, Signature}; +use bitcoin::secp256k1::Error as SecpError; use bitcoin::secp256k1; use std::{cmp, mem}; @@ -357,7 +358,7 @@ impl_writeable!(ChannelPublicKeys, 33*5, { impl TxCreationKeys { /// Create a new TxCreationKeys from channel base points and the per-commitment point - pub fn new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { + pub fn derive_new(secp_ctx: &Secp256k1, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result { Ok(TxCreationKeys { per_commitment_point: per_commitment_point.clone(), revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?, diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f7f13827..066f71c5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1126,7 +1126,7 @@ impl Channel { let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint; let their_pubkeys = self.their_pubkeys.as_ref().unwrap(); - Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned())) + Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned())) } #[inline] @@ -1140,7 +1140,7 @@ impl Channel { let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint; let their_pubkeys = self.their_pubkeys.as_ref().unwrap(); - Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned())) + Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned())) } /// Gets the redeemscript for the funding transaction output (ie the funding transaction output @@ -4673,7 +4673,7 @@ mod tests { let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); let htlc_basepoint = &chan.local_keys.pubkeys().htlc_basepoint; - let keys = TxCreationKeys::new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap(); + let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap(); chan.their_pubkeys = Some(their_pubkeys); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fdbbb43d..6116a49e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -42,10 +42,12 @@ use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpd use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; +use ln::msgs::NetAddress; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys}; use util::config::UserConfig; +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; @@ -312,7 +314,7 @@ pub(super) struct ChannelHolder { claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, + pub(super) pending_msg_events: Vec, } /// State we hold per-peer. In the future we should put channels in here, but for now we only hold @@ -1484,7 +1486,7 @@ impl // be absurd. We ensure this by checking that at least 500 (our stated public contract on when // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2; + const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; #[deny(const_err)] #[allow(dead_code)] // ...by failing to compile if the number of addresses that would be half of a message is @@ -1504,7 +1506,7 @@ impl /// only Tor Onion addresses. /// /// Panics if addresses is absurdly large (more than 500). - pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { + pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { let _ = self.total_consistency_lock.read().unwrap(); if addresses.len() > 500 { @@ -3011,14 +3013,14 @@ impl } } -impl events::MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_msg_events(&self) -> Vec { + fn get_and_clear_pending_msg_events(&self) -> Vec { //TODO: This behavior should be documented. It's non-intuitive that we query // ChannelMonitors when clearing other events. self.process_pending_monitor_events(); @@ -3030,14 +3032,14 @@ impl } } -impl events::EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_events(&self) -> Vec { + fn get_and_clear_pending_events(&self) -> Vec { //TODO: This behavior should be documented. It's non-intuitive that we query // ChannelMonitors when clearing other events. self.process_pending_monitor_events(); @@ -3774,7 +3776,27 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: /// /// In such cases the latest local transactions will be sent to the tx_broadcaster included in /// this struct. - pub channel_monitors: &'a mut HashMap>, + pub channel_monitors: HashMap>, +} + +impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> + ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L> + where M::Target: ManyChannelMonitor, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor + /// HashMap for you. This is primarily useful for C bindings where it is not practical to + /// populate a HashMap directly from C. + pub fn new(keys_manager: K, fee_estimator: F, monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig, + mut channel_monitors: Vec<&'a mut ChannelMonitor>) -> Self { + Self { + keys_manager, fee_estimator, monitor, tx_broadcaster, logger, default_config, + channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect() + } + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the @@ -3801,7 +3823,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result { let _ver: u8 = Readable::read(reader)?; let min_ver: u8 = Readable::read(reader)?; if min_ver > SERIALIZATION_VERSION { diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 060e88fa..570982fb 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -47,11 +47,13 @@ use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys}; use util::logger::Logger; use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48}; use util::{byte_utils, events}; +use util::events::Event; use std::collections::{HashMap, hash_map}; use std::sync::Mutex; use std::{hash,cmp, mem}; use std::ops::Deref; +use std::io::Error; /// An update generated by the underlying Channel itself which contains some new information the /// ChannelMonitor should be made aware of. @@ -317,7 +319,7 @@ impl Vec { + fn get_and_clear_pending_events(&self) -> Vec { let mut pending_events = Vec::new(); for chan in self.monitors.lock().unwrap().values_mut() { pending_events.append(&mut chan.get_and_clear_pending_events()); @@ -795,7 +797,7 @@ pub struct ChannelMonitor { payment_preimages: HashMap, pending_monitor_events: Vec, - pending_events: Vec, + pending_events: Vec, // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce @@ -946,7 +948,7 @@ impl ChannelMonitor { /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the /// returned block hash and the the current chain and then reconnecting blocks to get to the /// best chain) upon deserializing the object! - pub fn write_for_disk(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + pub fn write_for_disk(&self, writer: &mut W) -> Result<(), Error> { //TODO: We still write out all the serialization here manually instead of using the fancy //serialization framework we have, we should migrate things over to it. writer.write_all(&[SERIALIZATION_VERSION; 1])?; @@ -1452,7 +1454,7 @@ impl ChannelMonitor { /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do /// no internal locking in ChannelMonitors. - pub fn get_and_clear_pending_events(&mut self) -> Vec { + pub fn get_and_clear_pending_events(&mut self) -> Vec { let mut ret = Vec::new(); mem::swap(&mut ret, &mut self.pending_events); ret @@ -1957,7 +1959,7 @@ impl ChannelMonitor { }, OnchainEvent::MaturingOutput { descriptor } => { log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); - self.pending_events.push(events::Event::SpendableOutputs { + self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor] }); } @@ -2199,16 +2201,30 @@ impl ChannelMonitor { fn is_paying_spendable_output(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger { let mut spendable_output = None; for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us + if i > ::std::u16::MAX as usize { + // While it is possible that an output exists on chain which is greater than the + // 2^16th output in a given transaction, this is only possible if the output is not + // in a lightning transaction and was instead placed there by some third party who + // wishes to give us money for no reason. + // Namely, any lightning transactions which we pre-sign will never have anywhere + // near 2^16 outputs both because such transactions must have ~2^16 outputs who's + // scripts are not longer than one byte in length and because they are inherently + // non-standard due to their size. + // Thus, it is completely safe to ignore such outputs, and while it may result in + // us ignoring non-lightning fund to us, that is only possible if someone fills + // nearly a full block with garbage just to hit this case. + continue; + } if outp.script_pubkey == self.destination_script { spendable_output = Some(SpendableOutputDescriptor::StaticOutput { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), }); break; } else if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script { if broadcasted_local_revokable_script.0 == outp.script_pubkey { spendable_output = Some(SpendableOutputDescriptor::DynamicOutputP2WSH { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, per_commitment_point: broadcasted_local_revokable_script.1, to_self_delay: self.on_local_tx_csv, output: outp.clone(), @@ -2219,14 +2235,14 @@ impl ChannelMonitor { } } else if self.remote_payment_script == outp.script_pubkey { spendable_output = Some(SpendableOutputDescriptor::StaticOutputRemotePayment { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), key_derivation_params: self.keys.key_derivation_params(), }); break; } else if outp.script_pubkey == self.shutdown_script { spendable_output = Some(SpendableOutputDescriptor::StaticOutput { - outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 }, + outpoint: OutPoint { txid: tx.txid(), index: i as u16 }, output: outp.clone(), }); } @@ -2437,7 +2453,7 @@ impl Readable for (BlockHash, ChannelMonitor } let pending_events_len: u64 = Readable::read(reader)?; - let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); + let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::())); for _ in 0..pending_events_len { if let Some(event) = MaybeReadable::read(reader)? { pending_events.push(event); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 90421209..b3d5e8c9 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -169,7 +169,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { monitor: self.chan_monitor, tx_broadcaster: self.tx_broadcaster.clone(), logger: &test_utils::TestLogger::new(), - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap(); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4b111f2a..ef5b0e0c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1636,7 +1636,7 @@ fn test_fee_spike_violation_fails_htlc() { // Assemble the set of keys we can use for signatures for our commitment_signed message. let commitment_secret = SecretKey::from_slice(&remote_secret1).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &commitment_secret); - let commit_tx_keys = chan_utils::TxCreationKeys::new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint, + let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint, &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); // Build the remote commitment transaction so we can sign it, and then later use the @@ -3625,7 +3625,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { let logger = test_utils::TestLogger::new(); let payment_event = { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3800,7 +3802,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { // Channel should still work fine... let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } @@ -4314,7 +4318,7 @@ fn test_no_txn_manager_serialize_deserialize() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4422,7 +4426,7 @@ fn test_manager_serialize_deserialize_events() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4512,7 +4516,7 @@ fn test_simple_manager_serialize_deserialize() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap() }; nodes_0_deserialized = nodes_0_deserialized_tmp; @@ -4602,7 +4606,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), }) { } else { panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return"); }; @@ -4616,7 +4620,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { monitor: nodes[0].chan_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, - channel_monitors: &mut node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), + channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), }).unwrap(); nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); @@ -4668,7 +4672,7 @@ macro_rules! check_spendable_outputs { match *outp { SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, ref output, ref key_derivation_params } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), @@ -4696,7 +4700,7 @@ macro_rules! check_spendable_outputs { }, SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref remote_revocation_pubkey } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: *to_self_delay as u32, witness: Vec::new(), @@ -4729,7 +4733,7 @@ macro_rules! check_spendable_outputs { SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { let secp_ctx = Secp256k1::new(); let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), @@ -7887,7 +7891,7 @@ fn test_data_loss_protect() { logger: &logger, tx_broadcaster: &tx_broadcaster, default_config: UserConfig::default(), - channel_monitors: &mut channel_monitors, + channel_monitors, }).unwrap().1 }; nodes[0].node = &node_state_0; diff --git a/lightning/src/ln/onchaintx.rs b/lightning/src/ln/onchaintx.rs index e21e8fb2..37c7f396 100644 --- a/lightning/src/ln/onchaintx.rs +++ b/lightning/src/ln/onchaintx.rs @@ -583,7 +583,7 @@ impl OnchainTxHandler { for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() { match per_outp_material { &InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_remote_tx_csv } => { - if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { + if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { let witness_script = if let Some(ref htlc) = *htlc { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key) @@ -607,7 +607,7 @@ impl OnchainTxHandler { } }, &InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc } => { - if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { + if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) { let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key); if !preimage.is_some() { bumped_tx.lock_time = htlc.cltv_expiry }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation diff --git a/lightning/src/routing/network_graph.rs b/lightning/src/routing/network_graph.rs index 89c19b84..c46781d1 100644 --- a/lightning/src/routing/network_graph.rs +++ b/lightning/src/routing/network_graph.rs @@ -20,19 +20,32 @@ use bitcoin::blockdata::opcodes; use chain::chaininterface::{ChainError, ChainWatchInterface}; use ln::features::{ChannelFeatures, NodeFeatures}; -use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField, MAX_VALUE_MSAT}; +use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT}; +use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField}; use ln::msgs; use util::ser::{Writeable, Readable, Writer}; use util::logger::Logger; use std::{cmp, fmt}; -use std::sync::RwLock; +use std::sync::{RwLock, RwLockReadGuard}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::collections::BTreeMap; use std::collections::btree_map::Entry as BtreeEntry; use std::ops::Deref; use bitcoin::hashes::hex::ToHex; +/// Represents the network as nodes and channels between them +#[derive(PartialEq)] +pub struct NetworkGraph { + channels: BTreeMap, + nodes: BTreeMap, +} + +/// A simple newtype for RwLockReadGuard<'a, NetworkGraph>. +/// This exists only to make accessing a RwLock possible from +/// the C bindings, as it can be done directly in Rust code. +pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>); + /// Receives and validates network updates from peers, /// stores authentic and relevant data as a network graph. /// This network graph is then used for routing payments. @@ -77,6 +90,21 @@ impl NetGraphMsgHandler where C::Target: ChainWatchInt logger, } } + + /// Take a read lock on the network_graph and return it in the C-bindings + /// newtype helper. This is likely only useful when called via the C + /// bindings as you can call `self.network_graph.read().unwrap()` in Rust + /// yourself. + pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> { + LockedNetworkGraph(self.network_graph.read().unwrap()) + } +} + +impl<'a> LockedNetworkGraph<'a> { + /// Get a reference to the NetworkGraph which this read-lock contains. + pub fn graph(&self) -> &NetworkGraph { + &*self.0 + } } @@ -147,7 +175,7 @@ impl RoutingMessageHandler for N self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx)) } - fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option, Option)> { + fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option, Option)> { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = network_graph.get_channels().range(starting_point..); @@ -175,7 +203,7 @@ impl RoutingMessageHandler for N result } - fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { + fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec { let network_graph = self.network_graph.read().unwrap(); let mut result = Vec::with_capacity(batch_amount as usize); let mut iter = if let Some(pubkey) = starting_point { @@ -443,13 +471,6 @@ impl Readable for NodeInfo { } } -/// Represents the network as nodes and channels between them -#[derive(PartialEq)] -pub struct NetworkGraph { - channels: BTreeMap, - nodes: BTreeMap, -} - impl Writeable for NetworkGraph { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (self.channels.len() as u64).write(writer)?; diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 7730fd8f..23232a0d 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -14,7 +14,7 @@ use bitcoin::secp256k1::key::PublicKey; -use ln::channelmanager; +use ln::channelmanager::ChannelDetails; use ln::features::{ChannelFeatures, NodeFeatures}; use ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT}; use routing::network_graph::{NetworkGraph, RoutingFees}; @@ -169,8 +169,8 @@ struct DummyDirectionalChannelInfo { /// The fees on channels from us to next-hops are ignored (as they are assumed to all be /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat /// *is* checked as they may change based on the receiving node. -pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, - last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result where L::Target: Logger { +pub fn get_route(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[&ChannelDetails]>, + last_hops: &[&RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result where L::Target: Logger { // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by // uptime/success in using a node in the past. if *target == *our_node_id { @@ -907,7 +907,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -954,7 +954,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -1018,7 +1018,7 @@ mod tests { inbound_capacity_msat: 0, is_live: true, }]; - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[7]); @@ -1071,7 +1071,7 @@ mod tests { let (_, our_id, _, nodes) = get_nodes(&secp_ctx); // Simple test across 2, 3, 5, and 4 via a last_hop channel - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes), 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes).iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 5); assert_eq!(route.paths[0][0].pubkey, nodes[1]); @@ -1130,7 +1130,7 @@ mod tests { is_live: true, }]; let mut last_hops = last_hops(&nodes); - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans), &last_hops, 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans.iter().collect::>()), &last_hops.iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 2); assert_eq!(route.paths[0][0].pubkey, nodes[3]); @@ -1150,7 +1150,7 @@ mod tests { last_hops[0].fees.base_msat = 1000; // Revert to via 6 as the fee on 8 goes up - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 100, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::>(), 100, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 4); assert_eq!(route.paths[0][0].pubkey, nodes[1]); @@ -1184,7 +1184,7 @@ mod tests { assert_eq!(route.paths[0][3].channel_features.le_flags(), &Vec::::new()); // We can't learn any flags from invoices, sadly // ...but still use 8 for larger payments as 6 has a variable feerate - let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 2000, 42, Arc::clone(&logger)).unwrap(); + let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::>(), 2000, 42, Arc::clone(&logger)).unwrap(); assert_eq!(route.paths[0].len(), 5); assert_eq!(route.paths[0][0].pubkey, nodes[1]); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index c3726365..712b5937 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -12,30 +12,6 @@ use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT}; -/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. -/// -/// Default::default() provides sane defaults for most configurations -/// (but currently with 0 relay fees!) -#[derive(Clone, Debug)] -pub struct UserConfig { - /// Channel config that we propose to our counterparty. - pub own_channel_config: ChannelHandshakeConfig, - /// Limits applied to our counterparty's proposed channel config settings. - pub peer_channel_config_limits: ChannelHandshakeLimits, - /// Channel config which affects behavior during channel lifetime. - pub channel_options: ChannelConfig, -} - -impl Default for UserConfig { - fn default() -> Self { - UserConfig { - own_channel_config: ChannelHandshakeConfig::default(), - peer_channel_config_limits: ChannelHandshakeLimits::default(), - channel_options: ChannelConfig::default(), - } - } -} - /// Configuration we set when applicable. /// /// Default::default() provides sane defaults. @@ -228,3 +204,27 @@ impl_writeable!(ChannelConfig, 8+1+1, { announced_channel, commit_upfront_shutdown_pubkey }); + +/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. +/// +/// Default::default() provides sane defaults for most configurations +/// (but currently with 0 relay fees!) +#[derive(Clone, Debug)] +pub struct UserConfig { + /// Channel config that we propose to our counterparty. + pub own_channel_config: ChannelHandshakeConfig, + /// Limits applied to our counterparty's proposed channel config settings. + pub peer_channel_config_limits: ChannelHandshakeLimits, + /// Channel config which affects behavior during channel lifetime. + pub channel_options: ChannelConfig, +} + +impl Default for UserConfig { + fn default() -> Self { + UserConfig { + own_channel_config: ChannelHandshakeConfig::default(), + peer_channel_config_limits: ChannelHandshakeLimits::default(), + channel_options: ChannelConfig::default(), + } + } +} diff --git a/lightning/src/util/enforcing_trait_impls.rs b/lightning/src/util/enforcing_trait_impls.rs index 0d20f7ad..2127cc1a 100644 --- a/lightning/src/util/enforcing_trait_impls.rs +++ b/lightning/src/util/enforcing_trait_impls.rs @@ -46,12 +46,12 @@ impl EnforcingChannelKeys { keys: &TxCreationKeys) { let remote_points = self.inner.remote_pubkeys(); - let keys_expected = TxCreationKeys::new(secp_ctx, - &keys.per_commitment_point, - &remote_points.delayed_payment_basepoint, - &remote_points.htlc_basepoint, - &self.inner.pubkeys().revocation_basepoint, - &self.inner.pubkeys().htlc_basepoint).unwrap(); + let keys_expected = TxCreationKeys::derive_new(secp_ctx, + &keys.per_commitment_point, + &remote_points.delayed_payment_basepoint, + &remote_points.htlc_basepoint, + &self.inner.pubkeys().revocation_basepoint, + &self.inner.pubkeys().htlc_basepoint).unwrap(); if keys != &keys_expected { panic!("derived different per-tx keys") } } } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 85484ace..e565c19c 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -136,13 +136,13 @@ impl<'a> std::fmt::Display for DebugSpendable<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self.0 { &SpendableOutputDescriptor::StaticOutput { ref outpoint, .. } => { - write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.index)?; } &SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, .. } => { - write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.index)?; } &SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, .. } => { - write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.vout)?; + write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.index)?; } } Ok(())