Merge pull request #676 from TheBlueMatt/2020-08-c-bindings-cleanups-3
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 26 Aug 2020 15:14:34 +0000 (08:14 -0700)
committerGitHub <noreply@github.com>
Wed, 26 Aug 2020 15:14:34 +0000 (08:14 -0700)
Pre-C-Bindings Cleanups #3

16 files changed:
fuzz/src/chanmon_consistency.rs
fuzz/src/router.rs
lightning/src/chain/chaininterface.rs
lightning/src/chain/keysinterface.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/channelmonitor.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/onchaintx.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/util/config.rs
lightning/src/util/enforcing_trait_impls.rs
lightning/src/util/macro_logger.rs

index 65f721e484032c1965aea8c2a54141ec63e5e063..3191eb27308767c97a9d9e770873dbfd154618dc 100644 (file)
@@ -235,7 +235,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                tx_broadcaster: broadcast.clone(),
                                logger,
                                default_config: config,
-                               channel_monitors: &mut monitor_refs,
+                               channel_monitors: monitor_refs,
                        };
 
                        (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
index 38e29de516ffb4b2572d462a191f728fcf4b054d..ffc4608376b71f9064bc4ad389100dcf326dc2b3 100644 (file)
@@ -236,7 +236,10 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                        }
                                        &last_hops_vec[..]
                                };
-                               let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target, first_hops, last_hops, slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger));
+                               let _ = get_route(&our_pubkey, &net_graph_msg_handler.network_graph.read().unwrap(), &target,
+                                       first_hops.map(|c| c.iter().collect::<Vec<_>>()).as_ref().map(|a| a.as_slice()),
+                                       &last_hops.iter().collect::<Vec<_>>(),
+                                       slice_to_be64(get_slice!(8)), slice_to_be32(get_slice!(4)), Arc::clone(&logger));
                        },
                        _ => return,
                }
index 644c3214aca9f464d4a99a628ea6cba072c2f0ba..fe3914545c6e05ac17bf012dd8598041c327b6f4 100644 (file)
@@ -245,13 +245,15 @@ pub type BlockNotifierRef<'a, C> = BlockNotifier<'a, &'a ChainListener, C>;
 /// or a BlockNotifierRef for conciseness. See their documentation for more details, but essentially
 /// you should default to using a BlockNotifierRef, and use a BlockNotifierArc instead when you
 /// require ChainListeners with static lifetimes, such as when you're using lightning-net-tokio.
-pub struct BlockNotifier<'a, CL: Deref<Target = ChainListener + 'a> + 'a, C: Deref> where C::Target: ChainWatchInterface {
+pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref>
+               where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface {
        listeners: Mutex<Vec<CL>>,
        chain_monitor: C,
        phantom: PhantomData<&'a ()>,
 }
 
-impl<'a, CL: Deref<Target = ChainListener + 'a> + 'a, C: Deref> BlockNotifier<'a, CL, C> where C::Target: ChainWatchInterface {
+impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C>
+               where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface {
        /// Constructs a new BlockNotifier without any listeners.
        pub fn new(chain_monitor: C) -> BlockNotifier<'a, CL, C> {
                BlockNotifier {
index e564fa7ca186e5fae392404bd7a5b00dd3b7477c..5571c8b45cd351e600b6a8837d3f9d7fc7f0ed82 100644 (file)
@@ -11,7 +11,7 @@
 //! spendable on-chain outputs which the user owns and is responsible for using just as any other
 //! on-chain output which is theirs.
 
-use bitcoin::blockdata::transaction::{Transaction, OutPoint, TxOut};
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Script, Builder};
 use bitcoin::blockdata::opcodes;
 use bitcoin::network::constants::Network;
@@ -31,9 +31,10 @@ use bitcoin::secp256k1;
 use util::byte_utils;
 use util::ser::{Writeable, Writer, Readable};
 
+use chain::transaction::OutPoint;
 use ln::chan_utils;
 use ln::chan_utils::{HTLCOutputInCommitment, make_funding_redeemscript, ChannelPublicKeys, LocalCommitmentTransaction, PreCalculatedTxCreationKeys};
-use ln::msgs;
+use ln::msgs::UnsignedChannelAnnouncement;
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::io::Error;
@@ -316,7 +317,7 @@ pub trait ChannelKeys : Send+Clone {
        /// Note that if this fails or is rejected, the channel will not be publicly announced and
        /// our counterparty may (though likely will not) close the channel on us for violating the
        /// protocol.
-       fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;
+       fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()>;
 
        /// Set the remote channel basepoints and remote/local to_self_delay.
        /// This is done immediately on incoming channels and as soon as the channel is accepted on outgoing channels.
@@ -582,7 +583,7 @@ impl ChannelKeys for InMemoryChannelKeys {
                Ok(secp_ctx.sign(&sighash, &self.funding_key))
        }
 
-       fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &msgs::UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
+       fn sign_channel_announcement<T: secp256k1::Signing>(&self, msg: &UnsignedChannelAnnouncement, secp_ctx: &Secp256k1<T>) -> Result<Signature, ()> {
                let msghash = hash_to_message!(&Sha256dHash::hash(&msg.encode()[..])[..]);
                Ok(secp_ctx.sign(&msghash, &self.funding_key))
        }
@@ -808,7 +809,7 @@ impl KeysInterface for KeysManager {
                self.shutdown_pubkey.clone()
        }
 
-       fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> InMemoryChannelKeys {
+       fn get_channel_keys(&self, _inbound: bool, channel_value_satoshis: u64) -> Self::ChanKeySigner {
                let child_ix = self.channel_child_index.fetch_add(1, Ordering::AcqRel);
                let ix_and_nanos: u64 = (child_ix as u64) << 32 | (self.starting_time_nanos as u64);
                self.derive_channel_keys(channel_value_satoshis, ix_and_nanos, self.starting_time_secs)
index e315398d52397bff904c34ef0a6504382fb918c2..998ac9cdcde9f9555c728f679e94d58991d74f28 100644 (file)
@@ -30,6 +30,7 @@ use util::byte_utils;
 
 use bitcoin::secp256k1::key::{SecretKey, PublicKey};
 use bitcoin::secp256k1::{Secp256k1, Signature};
+use bitcoin::secp256k1::Error as SecpError;
 use bitcoin::secp256k1;
 
 use std::{cmp, mem};
@@ -357,7 +358,7 @@ impl_writeable!(ChannelPublicKeys, 33*5, {
 
 impl TxCreationKeys {
        /// Create a new TxCreationKeys from channel base points and the per-commitment point
-       pub fn new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, secp256k1::Error> {
+       pub fn derive_new<T: secp256k1::Signing + secp256k1::Verification>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, a_delayed_payment_base: &PublicKey, a_htlc_base: &PublicKey, b_revocation_base: &PublicKey, b_htlc_base: &PublicKey) -> Result<TxCreationKeys, SecpError> {
                Ok(TxCreationKeys {
                        per_commitment_point: per_commitment_point.clone(),
                        revocation_key: derive_public_revocation_key(&secp_ctx, &per_commitment_point, &b_revocation_base)?,
index f7f138274331e1b167bb2158cd952c77e7d257c5..066f71c537876f382bf6372b5ba17139fd50a46a 100644 (file)
@@ -1126,7 +1126,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint;
                let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
 
-               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned()))
+               Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned()))
        }
 
        #[inline]
@@ -1140,7 +1140,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint;
                let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
 
-               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned()))
+               Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned()))
        }
 
        /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
@@ -4673,7 +4673,7 @@ mod tests {
                let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
                let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
                let htlc_basepoint = &chan.local_keys.pubkeys().htlc_basepoint;
-               let keys = TxCreationKeys::new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap();
+               let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint).unwrap();
 
                chan.their_pubkeys = Some(their_pubkeys);
 
index fdbbb43da3dd104ee5bd6699937af92df6dc4f7d..6116a49eee6088cc716cd8c60551ad463700bb79 100644 (file)
@@ -42,10 +42,12 @@ use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpd
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
+use ln::msgs::NetAddress;
 use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
 use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
 use util::config::UserConfig;
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
 use util::chacha20::{ChaCha20, ChaChaReader};
@@ -312,7 +314,7 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
        claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
-       pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
+       pub(super) pending_msg_events: Vec<MessageSendEvent>,
 }
 
 /// State we hold per-peer. In the future we should put channels in here, but for now we only hold
@@ -1484,7 +1486,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
+       const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
@@ -1504,7 +1506,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// only Tor Onion addresses.
        ///
        /// Panics if addresses is absurdly large (more than 500).
-       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
+       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
                let _ = self.total_consistency_lock.read().unwrap();
 
                if addresses.len() > 500 {
@@ -3011,14 +3013,14 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
        where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
                                L::Target: Logger,
 {
-       fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+       fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                //TODO: This behavior should be documented. It's non-intuitive that we query
                // ChannelMonitors when clearing other events.
                self.process_pending_monitor_events();
@@ -3030,14 +3032,14 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
        where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
                                L::Target: Logger,
 {
-       fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+       fn get_and_clear_pending_events(&self) -> Vec<Event> {
                //TODO: This behavior should be documented. It's non-intuitive that we query
                // ChannelMonitors when clearing other events.
                self.process_pending_monitor_events();
@@ -3774,7 +3776,27 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T:
        ///
        /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
        /// this struct.
-       pub channel_monitors: &'a mut HashMap<OutPoint, &'a mut ChannelMonitor<ChanSigner>>,
+       pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<ChanSigner>>,
+}
+
+impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+               ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>
+       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+               T::Target: BroadcasterInterface,
+               K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+               F::Target: FeeEstimator,
+               L::Target: Logger,
+       {
+       /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
+       /// HashMap for you. This is primarily useful for C bindings where it is not practical to
+       /// populate a HashMap directly from C.
+       pub fn new(keys_manager: K, fee_estimator: F, monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
+                       mut channel_monitors: Vec<&'a mut ChannelMonitor<ChanSigner>>) -> Self {
+               Self {
+                       keys_manager, fee_estimator, monitor, tx_broadcaster, logger, default_config,
+                       channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
+               }
+       }
 }
 
 // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
@@ -3801,7 +3823,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let _ver: u8 = Readable::read(reader)?;
                let min_ver: u8 = Readable::read(reader)?;
                if min_ver > SERIALIZATION_VERSION {
index 060e88faa209f7cbacec1ef1228d0d1e7d68bc1e..570982fb70c280de5637adedafccced787bcaacb 100644 (file)
@@ -47,11 +47,13 @@ use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
 use util::logger::Logger;
 use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
 use util::{byte_utils, events};
+use util::events::Event;
 
 use std::collections::{HashMap, hash_map};
 use std::sync::Mutex;
 use std::{hash,cmp, mem};
 use std::ops::Deref;
+use std::io::Error;
 
 /// An update generated by the underlying Channel itself which contains some new information the
 /// ChannelMonitor should be made aware of.
@@ -317,7 +319,7 @@ impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: De
              L::Target: Logger,
         C::Target: ChainWatchInterface,
 {
-       fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+       fn get_and_clear_pending_events(&self) -> Vec<Event> {
                let mut pending_events = Vec::new();
                for chan in self.monitors.lock().unwrap().values_mut() {
                        pending_events.append(&mut chan.get_and_clear_pending_events());
@@ -795,7 +797,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
        payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
 
        pending_monitor_events: Vec<MonitorEvent>,
-       pending_events: Vec<events::Event>,
+       pending_events: Vec<Event>,
 
        // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
        // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
@@ -946,7 +948,7 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
        /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
        /// returned block hash and the the current chain and then reconnecting blocks to get to the
        /// best chain) upon deserializing the object!
-       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
                //TODO: We still write out all the serialization here manually instead of using the fancy
                //serialization framework we have, we should migrate things over to it.
                writer.write_all(&[SERIALIZATION_VERSION; 1])?;
@@ -1452,7 +1454,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
        /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
        /// no internal locking in ChannelMonitors.
-       pub fn get_and_clear_pending_events(&mut self) -> Vec<events::Event> {
+       pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
                let mut ret = Vec::new();
                mem::swap(&mut ret, &mut self.pending_events);
                ret
@@ -1957,7 +1959,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                        },
                                        OnchainEvent::MaturingOutput { descriptor } => {
                                                log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
-                                               self.pending_events.push(events::Event::SpendableOutputs {
+                                               self.pending_events.push(Event::SpendableOutputs {
                                                        outputs: vec![descriptor]
                                                });
                                        }
@@ -2199,16 +2201,30 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        fn is_paying_spendable_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
                let mut spendable_output = None;
                for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us
+                       if i > ::std::u16::MAX as usize {
+                               // While it is possible that an output exists on chain which is greater than the
+                               // 2^16th output in a given transaction, this is only possible if the output is not
+                               // in a lightning transaction and was instead placed there by some third party who
+                               // wishes to give us money for no reason.
+                               // Namely, any lightning transactions which we pre-sign will never have anywhere
+                               // near 2^16 outputs both because such transactions must have ~2^16 outputs who's
+                               // scripts are not longer than one byte in length and because they are inherently
+                               // non-standard due to their size.
+                               // Thus, it is completely safe to ignore such outputs, and while it may result in
+                               // us ignoring non-lightning fund to us, that is only possible if someone fills
+                               // nearly a full block with garbage just to hit this case.
+                               continue;
+                       }
                        if outp.script_pubkey == self.destination_script {
                                spendable_output =  Some(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 },
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
                                });
                                break;
                        } else if let Some(ref broadcasted_local_revokable_script) = self.broadcasted_local_revokable_script {
                                if broadcasted_local_revokable_script.0 == outp.script_pubkey {
                                        spendable_output =  Some(SpendableOutputDescriptor::DynamicOutputP2WSH {
-                                               outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 },
+                                               outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                                per_commitment_point: broadcasted_local_revokable_script.1,
                                                to_self_delay: self.on_local_tx_csv,
                                                output: outp.clone(),
@@ -2219,14 +2235,14 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                }
                        } else if self.remote_payment_script == outp.script_pubkey {
                                spendable_output = Some(SpendableOutputDescriptor::StaticOutputRemotePayment {
-                                       outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 },
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
                                        key_derivation_params: self.keys.key_derivation_params(),
                                });
                                break;
                        } else if outp.script_pubkey == self.shutdown_script {
                                spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: BitcoinOutPoint { txid: tx.txid(), vout: i as u32 },
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
                                });
                        }
@@ -2437,7 +2453,7 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor
                }
 
                let pending_events_len: u64 = Readable::read(reader)?;
-               let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<events::Event>()));
+               let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
                for _ in 0..pending_events_len {
                        if let Some(event) = MaybeReadable::read(reader)? {
                                pending_events.push(event);
index 9042120955925bb338115aab1d8210ad4165256a..b3d5e8c9e9da934a72989a88a287816cae79446a 100644 (file)
@@ -169,7 +169,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        monitor: self.chan_monitor,
                                        tx_broadcaster: self.tx_broadcaster.clone(),
                                        logger: &test_utils::TestLogger::new(),
-                                       channel_monitors: &mut channel_monitors,
+                                       channel_monitors,
                                }).unwrap();
                        }
 
index 4b111f2af455b1a3842965cad2941786ec608383..ef5b0e0c77388aff3835254a4d63ae701a7d4afe 100644 (file)
@@ -1636,7 +1636,7 @@ fn test_fee_spike_violation_fails_htlc() {
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
        let commitment_secret = SecretKey::from_slice(&remote_secret1).unwrap();
        let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &commitment_secret);
-       let commit_tx_keys = chan_utils::TxCreationKeys::new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint,
+       let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint,
                &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
 
        // Build the remote commitment transaction so we can sign it, and then later use the
@@ -3625,7 +3625,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
        let logger = test_utils::TestLogger::new();
        let payment_event = {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
-               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
+               let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
+                       &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
+                       &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
                nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap();
                check_added_monitors!(nodes[0], 1);
 
@@ -3800,7 +3802,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
 
        // Channel should still work fine...
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
-       let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
+       let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(),
+               &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
+               &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
        let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000);
 }
@@ -4314,7 +4318,7 @@ fn test_no_txn_manager_serialize_deserialize() {
                        monitor: nodes[0].chan_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
-                       channel_monitors: &mut channel_monitors,
+                       channel_monitors,
                }).unwrap()
        };
        nodes_0_deserialized = nodes_0_deserialized_tmp;
@@ -4422,7 +4426,7 @@ fn test_manager_serialize_deserialize_events() {
                        monitor: nodes[0].chan_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
-                       channel_monitors: &mut channel_monitors,
+                       channel_monitors,
                }).unwrap()
        };
        nodes_0_deserialized = nodes_0_deserialized_tmp;
@@ -4512,7 +4516,7 @@ fn test_simple_manager_serialize_deserialize() {
                        monitor: nodes[0].chan_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
-                       channel_monitors: &mut channel_monitors,
+                       channel_monitors,
                }).unwrap()
        };
        nodes_0_deserialized = nodes_0_deserialized_tmp;
@@ -4602,7 +4606,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                monitor: nodes[0].chan_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
-               channel_monitors: &mut node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+               channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
        }) { } else {
                panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
        };
@@ -4616,7 +4620,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                monitor: nodes[0].chan_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
-               channel_monitors: &mut node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
+               channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
        }).unwrap();
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
@@ -4668,7 +4672,7 @@ macro_rules! check_spendable_outputs {
                                                        match *outp {
                                                                SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, ref output, ref key_derivation_params } => {
                                                                        let input = TxIn {
-                                                                               previous_output: outpoint.clone(),
+                                                                               previous_output: outpoint.into_bitcoin_outpoint(),
                                                                                script_sig: Script::new(),
                                                                                sequence: 0,
                                                                                witness: Vec::new(),
@@ -4696,7 +4700,7 @@ macro_rules! check_spendable_outputs {
                                                                },
                                                                SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref remote_revocation_pubkey } => {
                                                                        let input = TxIn {
-                                                                               previous_output: outpoint.clone(),
+                                                                               previous_output: outpoint.into_bitcoin_outpoint(),
                                                                                script_sig: Script::new(),
                                                                                sequence: *to_self_delay as u32,
                                                                                witness: Vec::new(),
@@ -4729,7 +4733,7 @@ macro_rules! check_spendable_outputs {
                                                                SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
                                                                        let secp_ctx = Secp256k1::new();
                                                                        let input = TxIn {
-                                                                               previous_output: outpoint.clone(),
+                                                                               previous_output: outpoint.into_bitcoin_outpoint(),
                                                                                script_sig: Script::new(),
                                                                                sequence: 0,
                                                                                witness: Vec::new(),
@@ -7887,7 +7891,7 @@ fn test_data_loss_protect() {
                        logger: &logger,
                        tx_broadcaster: &tx_broadcaster,
                        default_config: UserConfig::default(),
-                       channel_monitors: &mut channel_monitors,
+                       channel_monitors,
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
index e21e8fb2ddf39090cf0a88c701b83ac5a70559ee..37c7f3964a21d0315f46ba76be9e28455220fb56 100644 (file)
@@ -583,7 +583,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                        for (i, (outp, per_outp_material)) in cached_claim_datas.per_input_material.iter().enumerate() {
                                match per_outp_material {
                                        &InputMaterial::Revoked { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_remote_tx_csv } => {
-                                               if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
+                                               if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
 
                                                        let witness_script = if let Some(ref htlc) = *htlc {
                                                                chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key)
@@ -607,7 +607,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                                                }
                                        },
                                        &InputMaterial::RemoteHTLC { ref per_commitment_point, ref remote_delayed_payment_base_key, ref remote_htlc_base_key, ref preimage, ref htlc } => {
-                                               if let Ok(chan_keys) = TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
+                                               if let Ok(chan_keys) = TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, remote_delayed_payment_base_key, remote_htlc_base_key, &self.key_storage.pubkeys().revocation_basepoint, &self.key_storage.pubkeys().htlc_basepoint) {
                                                        let witness_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &chan_keys.a_htlc_key, &chan_keys.b_htlc_key, &chan_keys.revocation_key);
 
                                                        if !preimage.is_some() { bumped_tx.lock_time = htlc.cltv_expiry }; // Right now we don't aggregate time-locked transaction, if we do we should set lock_time before to avoid breaking hash computation
index 89c19b84896bdb37c4644322871131461bf8354c..c46781d11b5888b8e0f55114a525db4ea0908a75 100644 (file)
@@ -20,19 +20,32 @@ use bitcoin::blockdata::opcodes;
 
 use chain::chaininterface::{ChainError, ChainWatchInterface};
 use ln::features::{ChannelFeatures, NodeFeatures};
-use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, OptionalField, MAX_VALUE_MSAT};
+use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
 use ln::msgs;
 use util::ser::{Writeable, Readable, Writer};
 use util::logger::Logger;
 
 use std::{cmp, fmt};
-use std::sync::RwLock;
+use std::sync::{RwLock, RwLockReadGuard};
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::collections::BTreeMap;
 use std::collections::btree_map::Entry as BtreeEntry;
 use std::ops::Deref;
 use bitcoin::hashes::hex::ToHex;
 
+/// Represents the network as nodes and channels between them
+#[derive(PartialEq)]
+pub struct NetworkGraph {
+       channels: BTreeMap<u64, ChannelInfo>,
+       nodes: BTreeMap<PublicKey, NodeInfo>,
+}
+
+/// A simple newtype for RwLockReadGuard<'a, NetworkGraph>.
+/// This exists only to make accessing a RwLock<NetworkGraph> possible from
+/// the C bindings, as it can be done directly in Rust code.
+pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>);
+
 /// Receives and validates network updates from peers,
 /// stores authentic and relevant data as a network graph.
 /// This network graph is then used for routing payments.
@@ -77,6 +90,21 @@ impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: ChainWatchInt
                        logger,
                }
        }
+
+       /// Take a read lock on the network_graph and return it in the C-bindings
+       /// newtype helper. This is likely only useful when called via the C
+       /// bindings as you can call `self.network_graph.read().unwrap()` in Rust
+       /// yourself.
+       pub fn read_locked_graph<'a>(&'a self) -> LockedNetworkGraph<'a> {
+               LockedNetworkGraph(self.network_graph.read().unwrap())
+       }
+}
+
+impl<'a> LockedNetworkGraph<'a> {
+       /// Get a reference to the NetworkGraph which this read-lock contains.
+       pub fn graph(&self) -> &NetworkGraph {
+               &*self.0
+       }
 }
 
 
@@ -147,7 +175,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                self.network_graph.write().unwrap().update_channel(msg, Some(&self.secp_ctx))
        }
 
-       fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
+       fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
                let network_graph = self.network_graph.read().unwrap();
                let mut result = Vec::with_capacity(batch_amount as usize);
                let mut iter = network_graph.get_channels().range(starting_point..);
@@ -175,7 +203,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                result
        }
 
-       fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
+       fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement> {
                let network_graph = self.network_graph.read().unwrap();
                let mut result = Vec::with_capacity(batch_amount as usize);
                let mut iter = if let Some(pubkey) = starting_point {
@@ -443,13 +471,6 @@ impl Readable for NodeInfo {
        }
 }
 
-/// Represents the network as nodes and channels between them
-#[derive(PartialEq)]
-pub struct NetworkGraph {
-       channels: BTreeMap<u64, ChannelInfo>,
-       nodes: BTreeMap<PublicKey, NodeInfo>,
-}
-
 impl Writeable for NetworkGraph {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                (self.channels.len() as u64).write(writer)?;
index 7730fd8f2ff4bb11ee7c965363826030aae336b7..23232a0d0fb85d05423b61f09788dfef0e9dc0e5 100644 (file)
@@ -14,7 +14,7 @@
 
 use bitcoin::secp256k1::key::PublicKey;
 
-use ln::channelmanager;
+use ln::channelmanager::ChannelDetails;
 use ln::features::{ChannelFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, LightningError, MAX_VALUE_MSAT};
 use routing::network_graph::{NetworkGraph, RoutingFees};
@@ -169,8 +169,8 @@ struct DummyDirectionalChannelInfo {
 /// The fees on channels from us to next-hops are ignored (as they are assumed to all be
 /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat
 /// *is* checked as they may change based on the receiving node.
-pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>,
-       last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result<Route, LightningError> where L::Target: Logger {
+pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[&ChannelDetails]>,
+       last_hops: &[&RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result<Route, LightningError> where L::Target: Logger {
        // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by
        // uptime/success in using a node in the past.
        if *target == *our_node_id {
@@ -907,7 +907,7 @@ mod tests {
                        inbound_capacity_msat: 0,
                        is_live: true,
                }];
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans),  &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()),  &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 2);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[7]);
@@ -954,7 +954,7 @@ mod tests {
                        inbound_capacity_msat: 0,
                        is_live: true,
                }];
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 2);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[7]);
@@ -1018,7 +1018,7 @@ mod tests {
                        inbound_capacity_msat: 0,
                        is_live: true,
                }];
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 2);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[7]);
@@ -1071,7 +1071,7 @@ mod tests {
                let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
 
                // Simple test across 2, 3, 5, and 4 via a last_hop channel
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes), 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes).iter().collect::<Vec<_>>(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 5);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[1]);
@@ -1130,7 +1130,7 @@ mod tests {
                        is_live: true,
                }];
                let mut last_hops = last_hops(&nodes);
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans), &last_hops, 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], Some(&our_chans.iter().collect::<Vec<_>>()), &last_hops.iter().collect::<Vec<_>>(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 2);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[3]);
@@ -1150,7 +1150,7 @@ mod tests {
                last_hops[0].fees.base_msat = 1000;
 
                // Revert to via 6 as the fee on 8 goes up
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 100, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::<Vec<_>>(), 100, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 4);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[1]);
@@ -1184,7 +1184,7 @@ mod tests {
                assert_eq!(route.paths[0][3].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                // ...but still use 8 for larger payments as 6 has a variable feerate
-               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops, 2000, 42, Arc::clone(&logger)).unwrap();
+               let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops.iter().collect::<Vec<_>>(), 2000, 42, Arc::clone(&logger)).unwrap();
                assert_eq!(route.paths[0].len(), 5);
 
                assert_eq!(route.paths[0][0].pubkey, nodes[1]);
index c3726365996ae5b5cff69448aeb33c766ce82b54..712b5937bab7ea2517f41cef155a7fe8d707c2b9 100644 (file)
 
 use ln::channelmanager::{BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 
-/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig.
-///
-/// Default::default() provides sane defaults for most configurations
-/// (but currently with 0 relay fees!)
-#[derive(Clone, Debug)]
-pub struct UserConfig {
-       /// Channel config that we propose to our counterparty.
-       pub own_channel_config: ChannelHandshakeConfig,
-       /// Limits applied to our counterparty's proposed channel config settings.
-       pub peer_channel_config_limits: ChannelHandshakeLimits,
-       /// Channel config which affects behavior during channel lifetime.
-       pub channel_options: ChannelConfig,
-}
-
-impl Default for UserConfig {
-       fn default() -> Self {
-               UserConfig {
-                       own_channel_config: ChannelHandshakeConfig::default(),
-                       peer_channel_config_limits: ChannelHandshakeLimits::default(),
-                       channel_options: ChannelConfig::default(),
-               }
-       }
-}
-
 /// Configuration we set when applicable.
 ///
 /// Default::default() provides sane defaults.
@@ -228,3 +204,27 @@ impl_writeable!(ChannelConfig, 8+1+1, {
        announced_channel,
        commit_upfront_shutdown_pubkey
 });
+
+/// Top-level config which holds ChannelHandshakeLimits and ChannelConfig.
+///
+/// Default::default() provides sane defaults for most configurations
+/// (but currently with 0 relay fees!)
+#[derive(Clone, Debug)]
+pub struct UserConfig {
+       /// Channel config that we propose to our counterparty.
+       pub own_channel_config: ChannelHandshakeConfig,
+       /// Limits applied to our counterparty's proposed channel config settings.
+       pub peer_channel_config_limits: ChannelHandshakeLimits,
+       /// Channel config which affects behavior during channel lifetime.
+       pub channel_options: ChannelConfig,
+}
+
+impl Default for UserConfig {
+       fn default() -> Self {
+               UserConfig {
+                       own_channel_config: ChannelHandshakeConfig::default(),
+                       peer_channel_config_limits: ChannelHandshakeLimits::default(),
+                       channel_options: ChannelConfig::default(),
+               }
+       }
+}
index 0d20f7ad4224258161d6bcd79d137baab0a66e02..2127cc1a4da6024116a8b92d84cd85b568e5802a 100644 (file)
@@ -46,12 +46,12 @@ impl EnforcingChannelKeys {
                                                                       keys: &TxCreationKeys) {
                let remote_points = self.inner.remote_pubkeys();
 
-               let keys_expected = TxCreationKeys::new(secp_ctx,
-                                                       &keys.per_commitment_point,
-                                                       &remote_points.delayed_payment_basepoint,
-                                                       &remote_points.htlc_basepoint,
-                                                       &self.inner.pubkeys().revocation_basepoint,
-                                                       &self.inner.pubkeys().htlc_basepoint).unwrap();
+               let keys_expected = TxCreationKeys::derive_new(secp_ctx,
+                                                              &keys.per_commitment_point,
+                                                              &remote_points.delayed_payment_basepoint,
+                                                              &remote_points.htlc_basepoint,
+                                                              &self.inner.pubkeys().revocation_basepoint,
+                                                              &self.inner.pubkeys().htlc_basepoint).unwrap();
                if keys != &keys_expected { panic!("derived different per-tx keys") }
        }
 }
index 85484aceae31aa443c3ffdc380be2c1822f736e5..e565c19ccab84e50f447dde4a0bfc6ecb468104f 100644 (file)
@@ -136,13 +136,13 @@ impl<'a> std::fmt::Display for DebugSpendable<'a> {
        fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
                match self.0 {
                        &SpendableOutputDescriptor::StaticOutput { ref outpoint, .. } => {
-                               write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.vout)?;
+                               write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.index)?;
                        }
                        &SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, .. } => {
-                               write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.vout)?;
+                               write!(f, "DynamicOutputP2WSH {}:{} marked for spending", outpoint.txid, outpoint.index)?;
                        }
                        &SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, .. } => {
-                               write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.vout)?;
+                               write!(f, "DynamicOutputP2WPKH {}:{} marked for spending", outpoint.txid, outpoint.index)?;
                        }
                }
                Ok(())