Merge pull request #1029 from TheBlueMatt/2021-07-log-channel-close
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 5 Aug 2021 21:05:43 +0000 (21:05 +0000)
committerGitHub <noreply@github.com>
Thu, 5 Aug 2021 21:05:43 +0000 (21:05 +0000)
Log when a channel is closed on startup due to stale ChannelManager

35 files changed:
.github/workflows/build.yml
CHANGELOG.md
ci/check-compiles.sh
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-block-sync/src/http.rs
lightning-persister/src/util.rs
lightning/Cargo.toml
lightning/src/chain/channelmonitor.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/onchaintx.rs
lightning/src/chain/package.rs
lightning/src/lib.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/features.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs
lightning/src/ln/wire.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/util/chacha20.rs
lightning/src/util/enforcing_trait_impls.rs
lightning/src/util/events.rs
lightning/src/util/ser.rs
lightning/src/util/ser_macros.rs
lightning/src/util/test_utils.rs
lightning/src/util/transaction_utils.rs

index 5b886e19bff5d440cdabb8441b4e4a9d17077fdb..34337769a26677969bc8a3246c4cbb36f35648f7 100644 (file)
@@ -16,7 +16,7 @@ jobs:
                      1.41.0,
                      # 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, and coverage generation
                      1.45.2,
-                     # 1.47.0 will be the MSRV for no_std builds using hashbrown once core2 is updated
+                     # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
                      1.47.0]
         include:
           - toolchain: stable
@@ -26,10 +26,18 @@ jobs:
             platform: macos-latest
             build-net-tokio: true
             build-no-std: true
+          - toolchain: beta
+            platform: macos-latest
+            build-net-tokio: true
+            build-no-std: true
           - toolchain: stable
             platform: windows-latest
             build-net-tokio: true
             build-no-std: true
+          - toolchain: beta
+            platform: windows-latest
+            build-net-tokio: true
+            build-no-std: true
           - toolchain: beta
             build-net-tokio: true
             build-no-std: true
@@ -87,19 +95,19 @@ jobs:
       - name: Test on Rust ${{ matrix.toolchain }} with net-tokio and full code-linking for coverage generation
         if: matrix.coverage
         run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
-      - name: Test on no_std bullds Rust ${{ matrix.toolchain }}
+      - name: Test on no-std bullds Rust ${{ matrix.toolchain }}
         if: "matrix.build-no-std && !matrix.coverage"
         run: |
           cd lightning
-          cargo test --verbose --color always --no-default-features --features no_std
-          # check if there is a conflict between no_std and the default std feature
-          cargo test --verbose --color always --features no_std
+          cargo test --verbose --color always --no-default-features --features no-std
+          # check if there is a conflict between no-std and the default std feature
+          cargo test --verbose --color always --features no-std
           cd ..
-      - name: Test on no_std bullds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
+      - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
         if: "matrix.build-no-std && matrix.coverage"
         run: |
           cd lightning
-          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no_std
+          RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
           cd ..
       - name: Test on Rust ${{ matrix.toolchain }}
         if: "! matrix.build-net-tokio"
@@ -262,4 +270,4 @@ jobs:
           rustup component add clippy
       - name: Run default clippy linting
         run: |
-          cargo clippy -- -Aclippy::erasing_op -Aclippy::never_loop -Aclippy::if_same_then_else
+          cargo clippy -- -Aclippy::erasing_op -Aclippy::never_loop -Aclippy::if_same_then_else -Dclippy::try_err
index 764e2ffdc5bb8dc61d43819a3e1aff7fbac1d181..dc2ea744cf522aea5dc393843ceb0beef25ced4f 100644 (file)
@@ -1,3 +1,17 @@
+# 0.0.100 - WIP
+
+## Serialization Compatibility
+ * HTLCs which were in the process of being claimed on-chain when a pre-0.0.100
+   `ChannelMonitor` was serialized may generate `PaymentForwarded` events with
+   spurious `fee_earned_msat` values. This only applies to payments which were
+   unresolved at the time of the upgrade.
+ * 0.0.100 clients with pending PaymentForwarded events at serialization-time
+   will generate serialized `ChannelManager` objects which 0.0.99 and earlier
+   clients cannot read. The likelihood of this can be reduced by ensuring you
+   process all pending events immediately before serialization (as is done by
+   the `lightning-background-processor` crate).
+
+
 # 0.0.99 - 2021-07-09
 
 ## API Updates
index 79c2d92b761ce409cc31fda64d3667bc29bf1cfe..2bc31007b6b7463bfe9df5bd46d4b76bcc36901f 100755 (executable)
@@ -6,4 +6,4 @@ cargo check
 cargo doc
 cargo doc --document-private-items
 cd fuzz && cargo check --features=stdin_fuzz
-cd ../lightning && cargo check --no-default-features --features=no_std
+cd ../lightning && cargo check --no-default-features --features=no-std
index b879ec9b2fcb82c61c78ae95cf4c02efdd15f7bd..70ddac5d204225365a5be66b9c0348d69d3642d5 100644 (file)
@@ -805,6 +805,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                                },
                                                events::Event::PaymentSent { .. } => {},
                                                events::Event::PaymentFailed { .. } => {},
+                                               events::Event::PaymentForwarded { .. } if $node == 1 => {},
                                                events::Event::PendingHTLCsForwardable { .. } => {
                                                        nodes[$node].process_pending_htlc_forwards();
                                                },
index 9398dcb0b50ed59a1d972ca5d100bd61001d0cca..d1adf06e1ed19c9757a6c166c7de220e8fef9bc4 100644 (file)
@@ -596,12 +596,10 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                                        //TODO: enhance by fetching random amounts from fuzz input?
                                        payments_received.push(payment_hash);
                                },
-                               Event::PaymentSent {..} => {},
-                               Event::PaymentFailed {..} => {},
                                Event::PendingHTLCsForwardable {..} => {
                                        should_forward = true;
                                },
-                               Event::SpendableOutputs {..} => {},
+                               _ => {},
                        }
                }
        }
index 89054a23ffb5a936ad7814bcdcde8a248f20f18a..0721babfde3d1b626051ba1ccccb7240d1f5a5a7 100644 (file)
@@ -636,7 +636,10 @@ pub(crate) mod client_tests {
        #[test]
        fn connect_to_unresolvable_host() {
                match HttpClient::connect(("example.invalid", 80)) {
-                       Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::Other),
+                       Err(e) => {
+                               assert!(e.to_string().contains("failed to lookup address information") ||
+                                       e.to_string().contains("No such host"), "{:?}", e);
+                       },
                        Ok(_) => panic!("Expected error"),
                }
        }
index 1825980ad891fcb257cdaf956150a526ac5e326d..73b28985bfff6ae8b484e5f0d76c62e4091b3541 100644 (file)
@@ -135,7 +135,7 @@ mod tests {
                // Create the channel data file and make it a directory.
                fs::create_dir_all(get_full_filepath(path.clone(), filename.to_string())).unwrap();
                match write_to_file(path.clone(), filename.to_string(), &test_writeable) {
-                       Err(e) => assert_eq!(e.kind(), io::ErrorKind::Other),
+                       Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
                        _ => panic!("Unexpected Ok(())")
                }
                fs::remove_dir_all(path).unwrap();
@@ -178,7 +178,7 @@ mod tests {
                match write_to_file(path, filename, &test_writeable) {
                        Err(e) => {
                                #[cfg(not(target_os = "windows"))]
-                               assert_eq!(e.kind(), io::ErrorKind::Other);
+                               assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
                                #[cfg(target_os = "windows")]
                                assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
                        }
index 9a2861d6927da1969955c7b01f06877e5c95dafa..8580a53a3f88bf87bc698de0fed906a881dfa376 100644 (file)
@@ -26,25 +26,32 @@ max_level_debug = []
 unsafe_revoked_tx_signing = []
 unstable = []
 
-no_std = ["hashbrown", "bitcoin/no-std"]
+no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc"]
 std = ["bitcoin/std"]
 
 default = ["std"]
 
 [dependencies]
-bitcoin = "0.27"
+bitcoin = { version = "0.27", default-features = false, features = ["secp-recovery"] }
+# TODO remove this once rust-bitcoin PR #637 is released
+secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
 
 hashbrown = { version = "0.11", optional = true }
 hex = { version = "0.3", optional = true }
 regex = { version = "0.1.80", optional = true }
 
+core2 = { version = "0.3.0", optional = true, default-features = false }
+
 [dev-dependencies]
 hex = "0.3"
 regex = "0.1.80"
+# TODO remove this once rust-bitcoin PR #637 is released
+secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
 
 [dev-dependencies.bitcoin]
 version = "0.27"
-features = ["bitcoinconsensus"]
+default-features = false
+features = ["bitcoinconsensus", "secp-recovery"]
 
 [package.metadata.docs.rs]
 features = ["allow_wallclock_use"] # When https://github.com/rust-lang/rust/issues/43781 complies with our MSVR, we can add nice banners in the docs for the methods behind this feature-gate.
index a8ec8ee97b73f062a28492c7f472e512d119b3f9..7904d9bdefa4935a68ab44e04722fe5d5a220dc2 100644 (file)
@@ -53,7 +53,7 @@ use util::events::Event;
 
 use prelude::*;
 use core::{cmp, mem};
-use std::io::Error;
+use io::{self, Error};
 use core::ops::Deref;
 use sync::Mutex;
 
@@ -88,7 +88,7 @@ pub struct ChannelMonitorUpdate {
 pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
 
 impl Writeable for ChannelMonitorUpdate {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                write_ver_prefix!(w, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
                self.update_id.write(w)?;
                (self.updates.len() as u64).write(w)?;
@@ -100,7 +100,7 @@ impl Writeable for ChannelMonitorUpdate {
        }
 }
 impl Readable for ChannelMonitorUpdate {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(r, SERIALIZATION_VERSION);
                let update_id: u64 = Readable::read(r)?;
                let len: u64 = Readable::read(r)?;
@@ -199,10 +199,12 @@ pub enum MonitorEvent {
 pub struct HTLCUpdate {
        pub(crate) payment_hash: PaymentHash,
        pub(crate) payment_preimage: Option<PaymentPreimage>,
-       pub(crate) source: HTLCSource
+       pub(crate) source: HTLCSource,
+       pub(crate) onchain_value_satoshis: Option<u64>,
 }
 impl_writeable_tlv_based!(HTLCUpdate, {
        (0, payment_hash, required),
+       (1, onchain_value_satoshis, option),
        (2, source, required),
        (4, payment_preimage, option),
 });
@@ -293,7 +295,7 @@ struct CounterpartyCommitmentTransaction {
 }
 
 impl Writeable for CounterpartyCommitmentTransaction {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?;
                for (ref txid, ref htlcs) in self.per_htlc.iter() {
                        w.write_all(&txid[..])?;
@@ -311,7 +313,7 @@ impl Writeable for CounterpartyCommitmentTransaction {
        }
 }
 impl Readable for CounterpartyCommitmentTransaction {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
                let counterparty_commitment_transaction = {
                        let per_htlc_len: u64 = Readable::read(r)?;
                        let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64));
@@ -385,6 +387,7 @@ enum OnchainEvent {
        HTLCUpdate {
                source: HTLCSource,
                payment_hash: PaymentHash,
+               onchain_value_satoshis: Option<u64>,
        },
        MaturingOutput {
                descriptor: SpendableOutputDescriptor,
@@ -400,6 +403,7 @@ impl_writeable_tlv_based!(OnchainEventEntry, {
 impl_writeable_tlv_based_enum!(OnchainEvent,
        (0, HTLCUpdate) => {
                (0, source, required),
+               (1, onchain_value_satoshis, option),
                (2, payment_hash, required),
        },
        (1, MaturingOutput) => {
@@ -1574,6 +1578,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                                                event: OnchainEvent::HTLCUpdate {
                                                                                        source: (**source).clone(),
                                                                                        payment_hash: htlc.payment_hash.clone(),
+                                                                                       onchain_value_satoshis: Some(htlc.amount_msat / 1000),
                                                                                },
                                                                        };
                                                                        log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold());
@@ -1641,6 +1646,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                                        event: OnchainEvent::HTLCUpdate {
                                                                                source: (**source).clone(),
                                                                                payment_hash: htlc.payment_hash.clone(),
+                                                                               onchain_value_satoshis: Some(htlc.amount_msat / 1000),
                                                                        },
                                                                });
                                                        }
@@ -1779,27 +1785,6 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                let mut claim_requests = Vec::new();
                let mut watch_outputs = Vec::new();
 
-               macro_rules! wait_threshold_conf {
-                       ($source: expr, $commitment_tx: expr, $payment_hash: expr) => {
-                               self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
-                                       if entry.height != height { return true; }
-                                       match entry.event {
-                                               OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
-                                                       *update_source != $source
-                                               },
-                                               _ => true,
-                                       }
-                               });
-                               let entry = OnchainEventEntry {
-                                       txid: commitment_txid,
-                                       height,
-                                       event: OnchainEvent::HTLCUpdate { source: $source, payment_hash: $payment_hash },
-                               };
-                               log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, entry.confirmation_threshold());
-                               self.onchain_events_awaiting_threshold_conf.push(entry);
-                       }
-               }
-
                macro_rules! append_onchain_update {
                        ($updates: expr, $to_watch: expr) => {
                                claim_requests = $updates.0;
@@ -1828,11 +1813,30 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                }
 
                macro_rules! fail_dust_htlcs_after_threshold_conf {
-                       ($holder_tx: expr) => {
+                       ($holder_tx: expr, $commitment_tx: expr) => {
                                for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs {
                                        if htlc.transaction_output_index.is_none() {
                                                if let &Some(ref source) = source {
-                                                       wait_threshold_conf!(source.clone(), "lastest", htlc.payment_hash.clone());
+                                                       self.onchain_events_awaiting_threshold_conf.retain(|ref entry| {
+                                                               if entry.height != height { return true; }
+                                                               match entry.event {
+                                                                       OnchainEvent::HTLCUpdate { source: ref update_source, .. } => {
+                                                                               update_source != source
+                                                                       },
+                                                                       _ => true,
+                                                               }
+                                                       });
+                                                       let entry = OnchainEventEntry {
+                                                               txid: commitment_txid,
+                                                               height,
+                                                               event: OnchainEvent::HTLCUpdate {
+                                                                       source: source.clone(), payment_hash: htlc.payment_hash,
+                                                                       onchain_value_satoshis: Some(htlc.amount_msat / 1000)
+                                                               },
+                                                       };
+                                                       log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})",
+                                                               log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold());
+                                                       self.onchain_events_awaiting_threshold_conf.push(entry);
                                                }
                                        }
                                }
@@ -1840,9 +1844,9 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                }
 
                if is_holder_tx {
-                       fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx);
+                       fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx, "latest");
                        if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
-                               fail_dust_htlcs_after_threshold_conf!(holder_tx);
+                               fail_dust_htlcs_after_threshold_conf!(holder_tx, "previous");
                        }
                }
 
@@ -2090,7 +2094,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                // Produce actionable events from on-chain events having reached their threshold.
                for entry in onchain_events_reaching_threshold_conf.drain(..) {
                        match entry.event {
-                               OnchainEvent::HTLCUpdate { ref source, payment_hash } => {
+                               OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis } => {
                                        // Check for duplicate HTLC resolutions.
                                        #[cfg(debug_assertions)]
                                        {
@@ -2109,9 +2113,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
 
                                        log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0));
                                        self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
-                                               payment_hash: payment_hash,
+                                               payment_hash,
                                                payment_preimage: None,
                                                source: source.clone(),
+                                               onchain_value_satoshis,
                                        }));
                                },
                                OnchainEvent::MaturingOutput { descriptor } => {
@@ -2328,7 +2333,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                        if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
                                                                if let &Some(ref source) = pending_source {
                                                                        log_claim!("revoked counterparty commitment tx", false, pending_htlc, true);
-                                                                       payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
+                                                                       payment_data = Some(((**source).clone(), $htlc_output.payment_hash, $htlc_output.amount_msat));
                                                                        break;
                                                                }
                                                        }
@@ -2348,7 +2353,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                                // transaction. This implies we either learned a preimage, the HTLC
                                                                // has timed out, or we screwed up. In any case, we should now
                                                                // resolve the source HTLC with the original sender.
-                                                               payment_data = Some(((*source).clone(), htlc_output.payment_hash));
+                                                               payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat));
                                                        } else if !$holder_tx {
                                                                        check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
                                                                if payment_data.is_none() {
@@ -2381,7 +2386,7 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
 
                        // Check that scan_commitment, above, decided there is some source worth relaying an
                        // HTLC resolution backwards to and figure out whether we learned a preimage from it.
-                       if let Some((source, payment_hash)) = payment_data {
+                       if let Some((source, payment_hash, amount_msat)) = payment_data {
                                let mut payment_preimage = PaymentPreimage([0; 32]);
                                if accepted_preimage_claim {
                                        if !self.pending_monitor_events.iter().any(
@@ -2390,7 +2395,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
-                                                       payment_hash
+                                                       payment_hash,
+                                                       onchain_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else if offered_preimage_claim {
@@ -2402,7 +2408,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                                self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
                                                        source,
                                                        payment_preimage: Some(payment_preimage),
-                                                       payment_hash
+                                                       payment_hash,
+                                                       onchain_value_satoshis: Some(amount_msat / 1000),
                                                }));
                                        }
                                } else {
@@ -2418,7 +2425,10 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        let entry = OnchainEventEntry {
                                                txid: tx.txid(),
                                                height,
-                                               event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash },
+                                               event: OnchainEvent::HTLCUpdate {
+                                                       source, payment_hash,
+                                                       onchain_value_satoshis: Some(amount_msat / 1000),
+                                               },
                                        };
                                        log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold());
                                        self.onchain_events_awaiting_threshold_conf.push(entry);
@@ -2451,7 +2461,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        output: outp.clone(),
                                });
                                break;
-                       } else if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
+                       }
+                       if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
                                if broadcasted_holder_revokable_script.0 == outp.script_pubkey {
                                        spendable_output =  Some(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor {
                                                outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
@@ -2464,7 +2475,8 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        }));
                                        break;
                                }
-                       } else if self.counterparty_payment_script == outp.script_pubkey {
+                       }
+                       if self.counterparty_payment_script == outp.script_pubkey {
                                spendable_output = Some(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor {
                                        outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
@@ -2472,11 +2484,13 @@ impl<Signer: Sign> ChannelMonitorImpl<Signer> {
                                        channel_value_satoshis: self.channel_value_satoshis,
                                }));
                                break;
-                       } else if outp.script_pubkey == self.shutdown_script {
+                       }
+                       if outp.script_pubkey == self.shutdown_script {
                                spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
                                        outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
                                        output: outp.clone(),
                                });
+                               break;
                        }
                }
                if let Some(spendable_output) = spendable_output {
@@ -2581,7 +2595,7 @@ const MAX_ALLOC_SIZE: usize = 64*1024;
 
 impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
                for (BlockHash, ChannelMonitor<Signer>) {
-       fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
                macro_rules! unwrap_obj {
                        ($key: expr) => {
                                match $key {
index d7ff2a633cf5bce9c28198abdd23229fbbf74744..7356dad8e40d64c353a80efa77b89e01c2fed7b8 100644 (file)
@@ -39,7 +39,7 @@ use ln::msgs::UnsignedChannelAnnouncement;
 
 use prelude::*;
 use core::sync::atomic::{AtomicUsize, Ordering};
-use std::io::Error;
+use io::{self, Error};
 use ln::msgs::{DecodeError, MAX_VALUE_MSAT};
 
 /// Information about a spendable output to a P2WSH script. See
@@ -699,7 +699,7 @@ impl Writeable for InMemorySigner {
 }
 
 impl Readable for InMemorySigner {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let funding_key = Readable::read(reader)?;
@@ -1039,7 +1039,7 @@ impl KeysInterface for KeysManager {
        }
 
        fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError> {
-               InMemorySigner::read(&mut std::io::Cursor::new(reader))
+               InMemorySigner::read(&mut io::Cursor::new(reader))
        }
 
        fn sign_invoice(&self, invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> {
index cd28314651057d3659ee64dc5f2141935595b96b..44bbfc2d60ee54e8282de82eabdd18da17a39fcf 100644 (file)
@@ -32,6 +32,7 @@ use util::logger::Logger;
 use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
 use util::byte_utils;
 
+use io;
 use prelude::*;
 use alloc::collections::BTreeMap;
 use core::cmp;
@@ -94,7 +95,7 @@ impl_writeable_tlv_based_enum!(OnchainEvent,
 ;);
 
 impl Readable for Option<Vec<Option<(usize, Signature)>>> {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                match Readable::read(reader)? {
                        0u8 => Ok(None),
                        1u8 => {
@@ -115,7 +116,7 @@ impl Readable for Option<Vec<Option<(usize, Signature)>>> {
 }
 
 impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self {
                        &Some(ref vec) => {
                                1u8.write(writer)?;
@@ -191,7 +192,7 @@ const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
 impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
-       pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
 
                self.destination_script.write(writer)?;
@@ -242,7 +243,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
 }
 
 impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
-       fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let destination_script = Readable::read(reader)?;
@@ -285,7 +286,7 @@ impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
                for _ in 0..locktimed_packages_len {
                        let locktime = Readable::read(reader)?;
                        let packages_len: u64 = Readable::read(reader)?;
-                       let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / std::mem::size_of::<PackageTemplate>()));
+                       let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / core::mem::size_of::<PackageTemplate>()));
                        for _ in 0..packages_len {
                                packages.push(Readable::read(reader)?);
                        }
index b5c1ffdf9bc4c92557f5c4dde08e9f1754b54d3e..a86add4b9e56efbd1606da736011db882b1e399d 100644 (file)
@@ -31,6 +31,8 @@ use util::byte_utils;
 use util::logger::Logger;
 use util::ser::{Readable, Writer, Writeable};
 
+use io;
+use prelude::*;
 use core::cmp;
 use core::mem;
 use core::ops::Deref;
@@ -395,8 +397,8 @@ impl PackageSolvingData {
                        PackageSolvingData::RevokedOutput(_) => output_conf_height + 1,
                        PackageSolvingData::RevokedHTLCOutput(_) => output_conf_height + 1,
                        PackageSolvingData::CounterpartyOfferedHTLCOutput(_) => output_conf_height + 1,
-                       PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => std::cmp::max(outp.htlc.cltv_expiry, output_conf_height + 1),
-                       PackageSolvingData::HolderHTLCOutput(ref outp) => std::cmp::max(outp.cltv_expiry, output_conf_height + 1),
+                       PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => cmp::max(outp.htlc.cltv_expiry, output_conf_height + 1),
+                       PackageSolvingData::HolderHTLCOutput(ref outp) => cmp::max(outp.cltv_expiry, output_conf_height + 1),
                        PackageSolvingData::HolderFundingOutput(_) => output_conf_height + 1,
                };
                absolute_timelock
@@ -682,7 +684,7 @@ impl PackageTemplate {
 }
 
 impl Writeable for PackageTemplate {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                writer.write_all(&byte_utils::be64_to_array(self.inputs.len() as u64))?;
                for (ref outpoint, ref rev_outp) in self.inputs.iter() {
                        outpoint.write(writer)?;
@@ -699,7 +701,7 @@ impl Writeable for PackageTemplate {
 }
 
 impl Readable for PackageTemplate {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                let inputs_count = <u64 as Readable>::read(reader)?;
                let mut inputs: Vec<(BitcoinOutPoint, PackageSolvingData)> = Vec::with_capacity(cmp::min(inputs_count as usize, MAX_ALLOC_SIZE / 128));
                for _ in 0..inputs_count {
index 5c414b7b233caa0c86aec510b548f993214c7e0b..e6ecd1f3563c6a312a5e22ffe017a7ea011d7186 100644 (file)
 #![allow(bare_trait_objects)]
 #![allow(ellipsis_inclusive_range_patterns)]
 
+#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
+
 #![cfg_attr(all(any(test, feature = "_test_utils"), feature = "unstable"), feature(test))]
 #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] extern crate test;
 
+#[cfg(not(any(feature = "std", feature = "no-std")))]
+compile_error!("at least one of the `std` or `no-std` features must be enabled");
+
 #[macro_use]
 extern crate alloc;
 extern crate bitcoin;
+#[cfg(any(test, feature = "std"))]
 extern crate core;
+
 #[cfg(any(test, feature = "_test_utils"))] extern crate hex;
 #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] extern crate regex;
 
+#[cfg(not(feature = "std"))] extern crate core2;
+
 #[macro_use]
 pub mod util;
 pub mod chain;
 pub mod ln;
 pub mod routing;
 
+#[cfg(feature = "std")]
+use std::io;
+#[cfg(not(feature = "std"))]
+use core2::io;
+
+#[cfg(not(feature = "std"))]
+mod io_extras {
+       use core2::io::{self, Read, Write};
+
+       /// A writer which will move data into the void.
+       pub struct Sink {
+               _priv: (),
+       }
+
+       /// Creates an instance of a writer which will successfully consume all data.
+       pub const fn sink() -> Sink {
+               Sink { _priv: () }
+       }
+
+       impl core2::io::Write for Sink {
+               #[inline]
+               fn write(&mut self, buf: &[u8]) -> core2::io::Result<usize> {
+                       Ok(buf.len())
+               }
+
+               #[inline]
+               fn flush(&mut self) -> core2::io::Result<()> {
+                       Ok(())
+               }
+       }
+
+       pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64, io::Error>
+               where
+               R: Read,
+               W: Write,
+       {
+               let mut count = 0;
+               let mut buf = [0u8; 64];
+
+               loop {
+                       match reader.read(&mut buf) {
+                               Ok(0) => break,
+                               Ok(n) => { writer.write_all(&buf[0..n])?; count += n as u64; },
+                               Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {},
+                               Err(e) => return Err(e.into()),
+                       };
+               }
+               Ok(count)
+       }
+
+       pub fn read_to_end<D: io::Read>(mut d: D) -> Result<alloc::vec::Vec<u8>, io::Error> {
+               let mut result = vec![];
+               let mut buf = [0u8; 64];
+               loop {
+                       match d.read(&mut buf) {
+                               Ok(0) => break,
+                               Ok(n) => result.extend_from_slice(&buf[0..n]),
+                               Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {},
+                               Err(e) => return Err(e.into()),
+                       };
+               }
+               Ok(result)
+       }
+}
+
+#[cfg(feature = "std")]
+mod io_extras {
+       pub fn read_to_end<D: ::std::io::Read>(mut d: D) -> Result<Vec<u8>, ::std::io::Error> {
+               let mut buf = Vec::new();
+               d.read_to_end(&mut buf)?;
+               Ok(buf)
+       }
+
+       pub use std::io::{copy, sink};
+}
+
 mod prelude {
        #[cfg(feature = "hashbrown")]
        extern crate hashbrown;
 
-       pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque};
+       pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
        #[cfg(not(feature = "hashbrown"))]
        pub use std::collections::{HashMap, HashSet, hash_map};
        #[cfg(feature = "hashbrown")]
        pub use self::hashbrown::{HashMap, HashSet, hash_map};
+
+       pub use alloc::borrow::ToOwned;
+       pub use alloc::string::ToString;
 }
 
 #[cfg(feature = "std")]
index 6e0e50858f8972ff90865eb3c23f5c2ce39f10fa..4690d298aedee2a7f16b18adf09ffee8ac202c4e 100644 (file)
@@ -31,6 +31,7 @@ use bitcoin::secp256k1::{Secp256k1, Signature, Message};
 use bitcoin::secp256k1::Error as SecpError;
 use bitcoin::secp256k1;
 
+use io;
 use prelude::*;
 use core::cmp;
 use ln::chan_utils;
@@ -167,7 +168,7 @@ impl CounterpartyCommitmentSecrets {
 }
 
 impl Writeable for CounterpartyCommitmentSecrets {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                for &(ref secret, ref idx) in self.old_secrets.iter() {
                        writer.write_all(secret)?;
                        writer.write_all(&byte_utils::be64_to_array(*idx))?;
@@ -177,7 +178,7 @@ impl Writeable for CounterpartyCommitmentSecrets {
        }
 }
 impl Readable for CounterpartyCommitmentSecrets {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                let mut old_secrets = [([0; 32], 1 << 48); 49];
                for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
                        *secret = Readable::read(reader)?;
index 90519f286b6546628df70781ab85abf3ba195272..720190ec85523237bbc9a0e44e581b09b05586a7 100644 (file)
@@ -40,6 +40,7 @@ use ln::functional_test_utils::*;
 
 use util::test_utils;
 
+use io;
 use prelude::*;
 use sync::{Arc, Mutex};
 
@@ -122,7 +123,7 @@ fn test_monitor_and_persister_update_fail() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
-                       &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                       &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
@@ -1159,6 +1160,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+       expect_payment_forwarded!(nodes[1], Some(1000), false);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@ -2317,6 +2319,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
                assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
        }
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
+       expect_payment_forwarded!(nodes[1], Some(1000), false);
        check_added_monitors!(nodes[1], 1);
 
        let mut bs_updates = None;
index e08b8af49a3a40f5992f8e09b77f99f8279135fd..f80377b7d4a9247fd7e30dea1ee4930eda270ea9 100644 (file)
@@ -41,6 +41,7 @@ use util::errors::APIError;
 use util::config::{UserConfig,ChannelConfig};
 use util::scid_utils::scid_from_parts;
 
+use io;
 use prelude::*;
 use core::{cmp,mem,fmt};
 use core::ops::Deref;
@@ -306,6 +307,7 @@ pub struct CounterpartyForwardingInfo {
 enum UpdateFulfillFetch {
        NewClaim {
                monitor_update: ChannelMonitorUpdate,
+               htlc_value_msat: u64,
                msg: Option<msgs::UpdateFulfillHTLC>,
        },
        DuplicateClaim {},
@@ -319,6 +321,8 @@ pub enum UpdateFulfillCommitFetch {
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
                monitor_update: ChannelMonitorUpdate,
+               /// The value of the HTLC which was claimed, in msat.
+               htlc_value_msat: u64,
                /// The update_fulfill message and commitment_signed message (if the claim was not placed
                /// in the holding cell).
                msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
@@ -336,6 +340,9 @@ pub enum UpdateFulfillCommitFetch {
 // Holder designates channel data owned for the benefice of the user client.
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<Signer: Sign> {
+       #[cfg(any(test, feature = "_test_utils"))]
+       pub(crate) config: ChannelConfig,
+       #[cfg(not(any(test, feature = "_test_utils")))]
        config: ChannelConfig,
 
        user_id: u64,
@@ -1275,6 +1282,7 @@ impl<Signer: Sign> Channel<Signer> {
                // these, but for now we just have to treat them as normal.
 
                let mut pending_idx = core::usize::MAX;
+               let mut htlc_value_msat = 0;
                for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
                        if htlc.htlc_id == htlc_id_arg {
                                assert_eq!(htlc.payment_hash, payment_hash_calc);
@@ -1294,6 +1302,7 @@ impl<Signer: Sign> Channel<Signer> {
                                        }
                                }
                                pending_idx = idx;
+                               htlc_value_msat = htlc.amount_msat;
                                break;
                        }
                }
@@ -1335,7 +1344,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
-                                                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+                                                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                                                }
                                        },
                                        _ => {}
@@ -1347,7 +1356,7 @@ impl<Signer: Sign> Channel<Signer> {
                        });
                        #[cfg(any(test, feature = "fuzztarget"))]
                        self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
-                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                }
                #[cfg(any(test, feature = "fuzztarget"))]
                self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
@@ -1357,7 +1366,7 @@ impl<Signer: Sign> Channel<Signer> {
                        if let InboundHTLCState::Committed = htlc.state {
                        } else {
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                               return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
+                               return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                        }
                        log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
@@ -1365,6 +1374,7 @@ impl<Signer: Sign> Channel<Signer> {
 
                UpdateFulfillFetch::NewClaim {
                        monitor_update,
+                       htlc_value_msat,
                        msg: Some(msgs::UpdateFulfillHTLC {
                                channel_id: self.channel_id(),
                                htlc_id: htlc_id_arg,
@@ -1375,7 +1385,7 @@ impl<Signer: Sign> Channel<Signer> {
 
        pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
-                       UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => {
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
                                let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
                                        Err(e) => return Err((e, monitor_update)),
                                        Ok(res) => res
@@ -1384,9 +1394,10 @@ impl<Signer: Sign> Channel<Signer> {
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
-                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) })
+                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
                        },
-                       UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }),
+                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
+                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
                        UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
                }
        }
@@ -2163,7 +2174,7 @@ impl<Signer: Sign> Channel<Signer> {
 
        /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
        #[inline]
-       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
+       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
                for htlc in self.pending_outbound_htlcs.iter_mut() {
                        if htlc.htlc_id == htlc_id {
                                match check_preimage {
@@ -2182,13 +2193,13 @@ impl<Signer: Sign> Channel<Signer> {
                                        OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
                                                return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
                                }
-                               return Ok(&htlc.source);
+                               return Ok(htlc);
                        }
                }
                Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
-       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
+       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
@@ -2197,7 +2208,7 @@ impl<Signer: Sign> Channel<Signer> {
                }
 
                let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner());
-               self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone())
+               self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
        }
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
@@ -2496,7 +2507,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                // in it hitting the holding cell again and we cannot change the state of a
                                                // holding cell HTLC from fulfill to anything else.
                                                let (update_fulfill_msg_option, mut additional_monitor_update) =
-                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
                                                                (msg, monitor_update)
                                                        } else { unreachable!() };
                                                update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
@@ -4512,7 +4523,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
 );
 
 impl Writeable for ChannelUpdateStatus {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                // We only care about writing out the current state as it was announced, ie only either
                // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
                // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
@@ -4527,7 +4538,7 @@ impl Writeable for ChannelUpdateStatus {
 }
 
 impl Readable for ChannelUpdateStatus {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                Ok(match <u8 as Readable>::read(reader)? {
                        0 => ChannelUpdateStatus::Enabled,
                        1 => ChannelUpdateStatus::Disabled,
@@ -4537,7 +4548,7 @@ impl Readable for ChannelUpdateStatus {
 }
 
 impl<Signer: Sign> Writeable for Channel<Signer> {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
                // called.
 
@@ -4770,7 +4781,7 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
 const MAX_ALLOC_SIZE: usize = 64*1024;
 impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
                where K::Target: KeysInterface<Signer = Signer> {
-       fn read<R : ::std::io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
+       fn read<R : io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
                let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let user_id = Readable::read(reader)?;
index c5db4075626fb93972037d10e1e5f60d68a57404..24c0c688cf0843274a61954433098d31324810a5 100644 (file)
@@ -60,10 +60,11 @@ use util::chacha20::{ChaCha20, ChaChaReader};
 use util::logger::{Logger, Level};
 use util::errors::APIError;
 
+use io;
 use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
-use std::io::{Cursor, Read};
+use io::{Cursor, Read};
 use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
@@ -207,6 +208,14 @@ pub(super) enum HTLCFailReason {
        }
 }
 
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+       PrevHopForceClosed,
+       MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+       Success(u64),
+       DuplicateClaim,
+}
+
 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
@@ -2787,16 +2796,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                         HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
-                                               Err(Some(e)) => {
-                                                       if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+                                               ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+                                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                                // We got a temporary failure updating monitor, but will claim the
                                                                // HTLC when the monitor updating is restored (or on chain).
-                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+                                                               log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
                                                                claimed_any_htlcs = true;
-                                                       } else { errs.push(e); }
+                                                       } else { errs.push((pk, err)); }
+                                               },
+                                               ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+                                               ClaimFundsFromHop::DuplicateClaim => {
+                                                       // While we should never get here in most cases, if we do, it likely
+                                                       // indicates that the HTLC was timed out some time ago and is no longer
+                                                       // available to be claimed. Thus, it does not make sense to set
+                                                       // `claimed_any_htlcs`.
                                                },
-                                               Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
-                                               Ok(()) => claimed_any_htlcs = true,
+                                               ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
                                        }
                                }
                        }
@@ -2814,28 +2829,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                } else { false }
        }
 
-       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+       fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
                let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
                        Some(chan_id) => chan_id.clone(),
                        None => {
-                               return Err(None)
+                               return ClaimFundsFromHop::PrevHopForceClosed
                        }
                };
 
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
-                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
                                                                "Failed to update channel monitor with preimage {:?}: {:?}",
                                                                payment_preimage, e);
-                                                       return Err(Some((
+                                                       return ClaimFundsFromHop::MonitorUpdateFail(
                                                                chan.get().get_counterparty_node_id(),
                                                                handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                       )));
+                                                               Some(htlc_value_msat)
+                                                       );
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                        log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
@@ -2852,8 +2868,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                }
                                                        });
                                                }
+                                               return ClaimFundsFromHop::Success(htlc_value_msat);
+                                       } else {
+                                               return ClaimFundsFromHop::DuplicateClaim;
                                        }
-                                       return Ok(())
                                },
                                Err((e, monitor_update)) => {
                                        if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
@@ -2866,13 +2884,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if drop {
                                                chan.remove_entry();
                                        }
-                                       return Err(Some((counterparty_node_id, res)));
+                                       return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
                                },
                        }
                } else { unreachable!(); }
        }
 
-       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+       fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, .. } => {
                                mem::drop(channel_state_lock);
@@ -2891,29 +2909,51 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_outpoint = hop_data.outpoint;
-                               if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
-                                       Ok(()) => Ok(()),
-                                       Err(None) => {
-                                               let preimage_update = ChannelMonitorUpdate {
-                                                       update_id: CLOSED_CHANNEL_UPDATE_ID,
-                                                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                                               payment_preimage: payment_preimage.clone(),
-                                                       }],
-                                               };
-                                               // We update the ChannelMonitor on the backward link, after
-                                               // receiving an offchain preimage event from the forward link (the
-                                               // event being update_fulfill_htlc).
-                                               if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
-                                                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                  payment_preimage, e);
-                                               }
-                                               Ok(())
-                                       },
-                                       Err(Some(res)) => Err(res),
-                               } {
-                                       mem::drop(channel_state_lock);
-                                       let res: Result<(), _> = Err(err);
-                                       let _ = handle_error!(self, res, counterparty_node_id);
+                               let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+                               let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+                               let htlc_claim_value_msat = match res {
+                                       ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+                                       ClaimFundsFromHop::Success(amt) => Some(amt),
+                                       _ => None,
+                               };
+                               if let ClaimFundsFromHop::PrevHopForceClosed = res {
+                                       let preimage_update = ChannelMonitorUpdate {
+                                               update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                                                       payment_preimage: payment_preimage.clone(),
+                                               }],
+                                       };
+                                       // We update the ChannelMonitor on the backward link, after
+                                       // receiving an offchain preimage event from the forward link (the
+                                       // event being update_fulfill_htlc).
+                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                                                                                        payment_preimage, e);
+                                       }
+                                       // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+                                       // totally could be a duplicate claim, but we have no way of knowing
+                                       // without interrogating the `ChannelMonitor` we've provided the above
+                                       // update to. Instead, we simply document in `PaymentForwarded` that this
+                                       // can happen.
+                               }
+                               mem::drop(channel_state_lock);
+                               if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+                                       let result: Result<(), _> = Err(err);
+                                       let _ = handle_error!(self, result, pk);
+                               }
+
+                               if claimed_htlc {
+                                       if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                               let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+                                                       Some(claimed_htlc_value - forwarded_htlc_value)
+                                               } else { None };
+
+                                               let mut pending_events = self.pending_events.lock().unwrap();
+                                               pending_events.push(events::Event::PaymentForwarded {
+                                                       fee_earned_msat,
+                                                       claim_from_onchain_tx: from_onchain,
+                                               });
+                                       }
                                }
                        },
                }
@@ -3309,7 +3349,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_lock = self.channel_state.lock().unwrap();
-               let htlc_source = {
+               let (htlc_source, forwarded_htlc_value) = {
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
@@ -3321,7 +3361,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+               self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
                Ok(())
        }
 
@@ -3688,14 +3728,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Process pending events from the `chain::Watch`, returning whether any events were processed.
        fn process_pending_monitor_events(&self) -> bool {
                let mut failed_channels = Vec::new();
-               let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+               let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for monitor_event in pending_monitor_events {
+               for monitor_event in pending_monitor_events.drain(..) {
                        match monitor_event {
                                MonitorEvent::HTLCEvent(htlc_update) => {
                                        if let Some(preimage) = htlc_update.payment_preimage {
                                                log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
-                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+                                               self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
                                        } else {
                                                log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
                                                self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
@@ -3990,7 +4030,7 @@ where
                                result = NotifyOption::DoPersist;
                        }
 
-                       let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+                       let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
                        if !pending_events.is_empty() {
                                result = NotifyOption::DoPersist;
                        }
@@ -4610,7 +4650,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 });
 
 impl Writeable for ClaimableHTLC {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let payment_data = match &self.onion_payload {
                        OnionPayload::Invoice(data) => Some(data.clone()),
                        _ => None,
@@ -4714,7 +4754,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let _consistency_lock = self.total_consistency_lock.write().unwrap();
 
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
@@ -4910,7 +4950,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
                Ok((blockhash, Arc::new(chan_manager)))
        }
@@ -4924,7 +4964,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
         F::Target: FeeEstimator,
         L::Target: Logger,
 {
-       fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
index b90748aa87b7e5cd55bf0461244fd2c654feb7eb..e78fa3d50d2d05c5bfcafe1fd58eb1b654c167f7 100644 (file)
@@ -22,6 +22,7 @@
 //! [BOLT #9]: https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md
 //! [messages]: crate::ln::msgs
 
+use io;
 use prelude::*;
 use core::{cmp, fmt};
 use core::marker::PhantomData;
@@ -383,7 +384,7 @@ pub type InvoiceFeatures = Features<sealed::InvoiceContext>;
 
 impl InitFeatures {
        /// Writes all features present up to, and including, 13.
-       pub(crate) fn write_up_to_13<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       pub(crate) fn write_up_to_13<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                let len = cmp::min(2, self.flags.len());
                w.size_hint(len + 2);
                (len as u16).write(w)?;
@@ -692,7 +693,7 @@ impl<T: sealed::ShutdownAnySegwit> Features<T> {
 }
 
 impl<T: sealed::Context> Writeable for Features<T> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(self.flags.len() + 2);
                (self.flags.len() as u16).write(w)?;
                for f in self.flags.iter().rev() { // Swap back to big-endian
@@ -703,7 +704,7 @@ impl<T: sealed::Context> Writeable for Features<T> {
 }
 
 impl<T: sealed::Context> Readable for Features<T> {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
                let mut flags: Vec<u8> = Readable::read(r)?;
                flags.reverse(); // Swap to little-endian
                Ok(Self {
index 792a4826899e8e5ce497a9d88aee80a13fb19564..807bb20f7ece8f460facfa7dc8d955f4dff7c642 100644 (file)
@@ -39,6 +39,7 @@ use bitcoin::hash_types::BlockHash;
 
 use bitcoin::secp256k1::key::PublicKey;
 
+use io;
 use prelude::*;
 use core::cell::RefCell;
 use std::rc::Rc;
@@ -239,7 +240,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                let network_graph_ser = self.net_graph_msg_handler.network_graph.read().unwrap();
                                network_graph_ser.write(&mut w).unwrap();
-                               let network_graph_deser = <NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
+                               let network_graph_deser = <NetworkGraph>::read(&mut io::Cursor::new(&w.0)).unwrap();
                                assert!(network_graph_deser == *self.net_graph_msg_handler.network_graph.read().unwrap());
                                let net_graph_msg_handler = NetGraphMsgHandler::from_net_graph(
                                        Some(self.chain_source), self.logger, network_graph_deser
@@ -277,7 +278,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        old_monitor.write(&mut w).unwrap();
                                        let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
-                                               &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap();
+                                               &mut io::Cursor::new(&w.0), self.keys_manager).unwrap();
                                        deserialized_monitors.push(deserialized_monitor);
                                }
                        }
@@ -292,7 +293,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
 
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
-                               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+                               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
                                        default_config: *self.node.get_current_default_configuration(),
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
@@ -1005,6 +1006,20 @@ macro_rules! expect_payment_sent {
        }
 }
 
+macro_rules! expect_payment_forwarded {
+       ($node: expr, $expected_fee: expr, $upstream_force_closed: expr) => {
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+                               assert_eq!(fee_earned_msat, $expected_fee);
+                               assert_eq!(claim_from_onchain_tx, $upstream_force_closed);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+}
+
 #[cfg(test)]
 macro_rules! expect_payment_failure_chan_update {
        ($node: expr, $scid: expr, $chan_closed: expr) => {
@@ -1169,6 +1184,8 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp
                        ($node: expr, $prev_node: expr, $new_msgs: expr) => {
                                {
                                        $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
+                                       let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat;
+                                       expect_payment_forwarded!($node, Some(fee as u64), false);
                                        check_added_monitors!($node, 1);
                                        let new_next_msgs = if $new_msgs {
                                                let events = $node.node.get_and_clear_pending_msg_events();
index a13b06d8d993f34a80f61e5ffe11a8876bf8ba66..6e5395e65c651440946f22e92a8ada537c9300ee 100644 (file)
@@ -51,6 +51,7 @@ use bitcoin::secp256k1::key::{PublicKey,SecretKey};
 
 use regex;
 
+use io;
 use prelude::*;
 use alloc::collections::BTreeSet;
 use core::default::Default;
@@ -886,6 +887,7 @@ fn updates_shutdown_wait() {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+       expect_payment_forwarded!(nodes[1], Some(1000), false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@ -1060,6 +1062,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        assert!(updates.update_fee.is_none());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+       expect_payment_forwarded!(nodes[1], Some(1000), false);
        check_added_monitors!(nodes[1], 1);
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
@@ -2832,6 +2835,12 @@ fn test_htlc_on_chain_success() {
                assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
                added_monitors.clear();
        }
+       let forwarded_events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(forwarded_events.len(), 2);
+       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] {
+               } else { panic!(); }
+       if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] {
+               } else { panic!(); }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
@@ -4549,7 +4558,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
                <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
-                       ::read(&mut std::io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
+                       ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
                                default_config: Default::default(),
                                keys_manager,
                                fee_estimator: node_cfgs[0].fee_estimator,
@@ -5329,19 +5338,15 @@ fn test_onchain_to_onchain_claim() {
        // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]});
-       connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
+       check_added_monitors!(nodes[1], 1);
+       expect_payment_forwarded!(nodes[1], Some(1000), true);
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               // ChannelMonitor: claim tx, ChannelManager: local commitment tx
-               assert_eq!(b_txn.len(), 2);
+               // ChannelMonitor: claim tx
+               assert_eq!(b_txn.len(), 1);
                check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager
-               check_spends!(b_txn[1], c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor
-               assert_eq!(b_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
-               assert!(b_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
-               assert_ne!(b_txn[1].lock_time, 0); // Timeout tx
                b_txn.clear();
        }
-       check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
        check_added_monitors!(nodes[1], 1);
@@ -5367,19 +5372,14 @@ fn test_onchain_to_onchain_claim() {
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-       // ChannelMonitor: HTLC-Success tx + HTLC-Timeout RBF Bump, ChannelManager: local commitment tx + HTLC-Success tx
-       assert_eq!(b_txn.len(), 4);
-       check_spends!(b_txn[2], chan_1.3);
-       check_spends!(b_txn[3], b_txn[2]);
-       let (htlc_success_claim, htlc_timeout_bumped) =
-               if b_txn[0].input[0].previous_output.txid == commitment_tx[0].txid()
-                       { (&b_txn[0], &b_txn[1]) } else { (&b_txn[1], &b_txn[0]) };
-       check_spends!(htlc_success_claim, commitment_tx[0]);
-       assert_eq!(htlc_success_claim.input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
-       assert!(htlc_success_claim.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
-       assert_eq!(htlc_success_claim.lock_time, 0); // Success tx
-       check_spends!(htlc_timeout_bumped, c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor
-       assert_ne!(htlc_timeout_bumped.lock_time, 0); // Success tx
+       // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
+       assert_eq!(b_txn.len(), 3);
+       check_spends!(b_txn[1], chan_1.3);
+       check_spends!(b_txn[2], b_txn[1]);
+       check_spends!(b_txn[0], commitment_tx[0]);
+       assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+       assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+       assert_eq!(b_txn[0].lock_time, 0); // Success tx
 
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
@@ -5497,7 +5497,10 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        expect_payment_failed!(nodes[0], duplicate_payment_hash, false);
 
        // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
+       // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
+       // and nodes[2] fee) is rounded down and then claimed in full.
        mine_transaction(&nodes[1], &htlc_success_txn[0]);
+       expect_payment_forwarded!(nodes[1], Some(196*2), true);
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@ -7746,7 +7749,7 @@ fn test_data_loss_protect() {
 
        // Restore node A from previous state
        logger = test_utils::TestLogger::with_id(format!("node {}", 0));
-       let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
+       let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
        chain_source = test_utils::TestChainSource::new(Network::Testnet);
        tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
@@ -7755,7 +7758,7 @@ fn test_data_loss_protect() {
        node_state_0 = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
-               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
                        keys_manager: keys_manager,
                        fee_estimator: &fee_estimator,
                        chain_monitor: &monitor,
@@ -8850,7 +8853,7 @@ fn test_update_err_monitor_lockdown() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
@@ -8912,7 +8915,7 @@ fn test_concurrent_monitor_claim() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
@@ -8941,7 +8944,7 @@ fn test_concurrent_monitor_claim() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
@@ -9156,6 +9159,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
 
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
+       expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false);
        // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
        if !go_onchain_before_fulfill && broadcast_alice {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
index 85672f4cd978dbd521b01b4fe5493b7a9f8d438a..0042cf51bf3c2c570c5f1f007567b317230fc54a 100644 (file)
@@ -35,7 +35,8 @@ use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
 use prelude::*;
 use core::{cmp, fmt};
 use core::fmt::Debug;
-use std::io::Read;
+use io::{self, Read};
+use io_extras::read_to_end;
 
 use util::events::MessageSendEventsProvider;
 use util::logger;
@@ -64,7 +65,7 @@ pub enum DecodeError {
        BadLengthDescriptor,
        /// Error from std::io
        Io(/// (C-not exported) as ErrorKind doesn't have a reasonable mapping
-        ::std::io::ErrorKind),
+        io::ErrorKind),
        /// The message included zlib-compressed values, which we don't support.
        UnsupportedCompression,
 }
@@ -420,7 +421,7 @@ impl NetAddress {
 }
 
 impl Writeable for NetAddress {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self {
                        &NetAddress::IPv4 { ref addr, ref port } => {
                                1u8.write(writer)?;
@@ -979,9 +980,9 @@ impl fmt::Display for DecodeError {
        }
 }
 
-impl From<::std::io::Error> for DecodeError {
-       fn from(e: ::std::io::Error) -> Self {
-               if e.kind() == ::std::io::ErrorKind::UnexpectedEof {
+impl From<io::Error> for DecodeError {
+       fn from(e: io::Error) -> Self {
+               if e.kind() == io::ErrorKind::UnexpectedEof {
                        DecodeError::ShortRead
                } else {
                        DecodeError::Io(e.kind())
@@ -990,7 +991,7 @@ impl From<::std::io::Error> for DecodeError {
 }
 
 impl Writeable for OptionalField<Script> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                match *self {
                        OptionalField::Present(ref script) => {
                                // Note that Writeable for script includes the 16-bit length tag for us
@@ -1017,7 +1018,7 @@ impl Readable for OptionalField<Script> {
 }
 
 impl Writeable for OptionalField<u64> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                match *self {
                        OptionalField::Present(ref value) => {
                                value.write(w)?;
@@ -1065,7 +1066,7 @@ impl_writeable!(AnnouncementSignatures, 32+8+64*2, {
 });
 
 impl Writeable for ChannelReestablish {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(if let OptionalField::Present(..) = self.data_loss_protect { 32+2*8+33+32 } else { 32+2*8 });
                self.channel_id.write(w)?;
                self.next_local_commitment_number.write(w)?;
@@ -1142,7 +1143,7 @@ impl_writeable!(FundingLocked, 32+33, {
 });
 
 impl Writeable for Init {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                // global_features gets the bottom 13 bits of our features, and local_features gets all of
                // our relevant feature bits. This keeps us compatible with old nodes.
                self.features.write_up_to_13(w)?;
@@ -1231,7 +1232,7 @@ impl_writeable_len_match!(OnionErrorPacket, {
 });
 
 impl Writeable for OnionPacket {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(1 + 33 + 20*65 + 32);
                self.version.write(w)?;
                match self.public_key {
@@ -1269,7 +1270,7 @@ impl_writeable!(UpdateAddHTLC, 32+8+8+32+4+1366, {
 });
 
 impl Writeable for FinalOnionHopData {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(32 + 8 - (self.total_msat.leading_zeros()/8) as usize);
                self.payment_secret.0.write(w)?;
                HighZeroBytesDroppedVarInt(self.total_msat).write(w)
@@ -1285,7 +1286,7 @@ impl Readable for FinalOnionHopData {
 }
 
 impl Writeable for OnionHopData {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(33);
                // Note that this should never be reachable if Rust-Lightning generated the message, as we
                // check values are sane long before we get here, though its possible in the future
@@ -1386,7 +1387,7 @@ impl Readable for OnionHopData {
 }
 
 impl Writeable for Ping {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(self.byteslen as usize + 4);
                self.ponglen.write(w)?;
                vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
@@ -1408,7 +1409,7 @@ impl Readable for Ping {
 }
 
 impl Writeable for Pong {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(self.byteslen as usize + 2);
                vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
                Ok(())
@@ -1428,7 +1429,7 @@ impl Readable for Pong {
 }
 
 impl Writeable for UnsignedChannelAnnouncement {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(2 + 32 + 8 + 4*33 + self.features.byte_count() + self.excess_data.len());
                self.features.write(w)?;
                self.chain_hash.write(w)?;
@@ -1452,11 +1453,7 @@ impl Readable for UnsignedChannelAnnouncement {
                        node_id_2: Readable::read(r)?,
                        bitcoin_key_1: Readable::read(r)?,
                        bitcoin_key_2: Readable::read(r)?,
-                       excess_data: {
-                               let mut excess_data = vec![];
-                               r.read_to_end(&mut excess_data)?;
-                               excess_data
-                       },
+                       excess_data: read_to_end(r)?,
                })
        }
 }
@@ -1473,7 +1470,7 @@ impl_writeable_len_match!(ChannelAnnouncement, {
 });
 
 impl Writeable for UnsignedChannelUpdate {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                let mut size = 64 + self.excess_data.len();
                let mut message_flags: u8 = 0;
                if let OptionalField::Present(_) = self.htlc_maximum_msat {
@@ -1514,11 +1511,7 @@ impl Readable for UnsignedChannelUpdate {
                        fee_base_msat: Readable::read(r)?,
                        fee_proportional_millionths: Readable::read(r)?,
                        htlc_maximum_msat: if has_htlc_maximum_msat { Readable::read(r)? } else { OptionalField::Absent },
-                       excess_data: {
-                               let mut excess_data = vec![];
-                               r.read_to_end(&mut excess_data)?;
-                               excess_data
-                       },
+                       excess_data: read_to_end(r)?,
                })
        }
 }
@@ -1532,7 +1525,7 @@ impl_writeable_len_match!(ChannelUpdate, {
 });
 
 impl Writeable for ErrorMessage {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(32 + 2 + self.data.len());
                self.channel_id.write(w)?;
                (self.data.len() as u16).write(w)?;
@@ -1547,9 +1540,8 @@ impl Readable for ErrorMessage {
                        channel_id: Readable::read(r)?,
                        data: {
                                let mut sz: usize = <u16 as Readable>::read(r)? as usize;
-                               let mut data = vec![];
-                               let data_len = r.read_to_end(&mut data)?;
-                               sz = cmp::min(data_len, sz);
+                               let data = read_to_end(r)?;
+                               sz = cmp::min(data.len(), sz);
                                match String::from_utf8(data[..sz as usize].to_vec()) {
                                        Ok(s) => s,
                                        Err(_) => return Err(DecodeError::InvalidValue),
@@ -1560,7 +1552,7 @@ impl Readable for ErrorMessage {
 }
 
 impl Writeable for UnsignedNodeAnnouncement {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
                self.features.write(w)?;
                self.timestamp.write(w)?;
@@ -1630,7 +1622,7 @@ impl Readable for UnsignedNodeAnnouncement {
                        }
                        Vec::new()
                };
-               r.read_to_end(&mut excess_data)?;
+               excess_data.extend(read_to_end(r)?.iter());
                Ok(UnsignedNodeAnnouncement {
                        features,
                        timestamp,
@@ -1687,7 +1679,7 @@ impl Readable for QueryShortChannelIds {
 }
 
 impl Writeable for QueryShortChannelIds {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                // Calculated from 1-byte encoding_type plus 8-bytes per short_channel_id
                let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
 
@@ -1718,7 +1710,7 @@ impl Readable for ReplyShortChannelIdsEnd {
 }
 
 impl Writeable for ReplyShortChannelIdsEnd {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(32 + 1);
                self.chain_hash.write(w)?;
                self.full_information.write(w)?;
@@ -1753,7 +1745,7 @@ impl Readable for QueryChannelRange {
 }
 
 impl Writeable for QueryChannelRange {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(32 + 4 + 4);
                self.chain_hash.write(w)?;
                self.first_blocknum.write(w)?;
@@ -1803,7 +1795,7 @@ impl Readable for ReplyChannelRange {
 }
 
 impl Writeable for ReplyChannelRange {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
                w.size_hint(32 + 4 + 4 + 1 + 2 + encoding_len as usize);
                self.chain_hash.write(w)?;
@@ -1835,7 +1827,7 @@ impl Readable for GossipTimestampFilter {
 }
 
 impl Writeable for GossipTimestampFilter {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.size_hint(32 + 4 + 4);
                self.chain_hash.write(w)?;
                self.first_timestamp.write(w)?;
@@ -1863,8 +1855,8 @@ mod tests {
        use bitcoin::secp256k1::key::{PublicKey,SecretKey};
        use bitcoin::secp256k1::{Secp256k1, Message};
 
+       use io::Cursor;
        use prelude::*;
-       use std::io::Cursor;
 
        #[test]
        fn encoding_channel_reestablish_no_secret() {
index 5ddc5933f8d7d1810aa727dad5381372c53a71fc..8530d3d6610c181bfac5c1b1cab0f493090bb510 100644 (file)
@@ -33,9 +33,9 @@ use bitcoin::secp256k1;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::key::SecretKey;
 
+use io;
 use prelude::*;
 use core::default::Default;
-use std::io;
 
 use ln::functional_test_utils::*;
 
index cd8f95681eb346ca4d10b9c32ec7105a6b5cc027..4886168dd16cc277adfd536fcedbca83ba03ac1a 100644 (file)
@@ -27,7 +27,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use prelude::*;
-use std::io::Cursor;
+use io::Cursor;
 use core::convert::TryInto;
 use core::ops::Deref;
 
@@ -480,6 +480,7 @@ pub(super) fn process_onion_failure<T: secp256k1::Signing, L: Deref>(secp_ctx: &
 
 #[cfg(test)]
 mod tests {
+       use io;
        use prelude::*;
        use ln::PaymentHash;
        use ln::features::{ChannelFeatures, NodeFeatures};
@@ -648,7 +649,7 @@ mod tests {
                }
        }
        impl Writeable for RawOnionHopData {
-               fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                        writer.write_all(&self.data[..])
                }
        }
index ec7500190d7ec3b062636fca716e71805ffddb05..be12a32e99e5737ffa1a96236d269a917add4a14 100644 (file)
@@ -31,13 +31,14 @@ use util::logger::Logger;
 use routing::network_graph::NetGraphMsgHandler;
 
 use prelude::*;
+use io;
 use alloc::collections::LinkedList;
 use alloc::fmt::Debug;
 use sync::{Arc, Mutex};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::{cmp, hash, fmt, mem};
 use core::ops::Deref;
-use std::error;
+#[cfg(feature = "std")] use std::error;
 
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
@@ -230,6 +231,8 @@ impl fmt::Display for PeerHandleError {
                formatter.write_str("Peer Sent Invalid Data")
        }
 }
+
+#[cfg(feature = "std")]
 impl error::Error for PeerHandleError {
        fn description(&self) -> &str {
                "Peer Sent Invalid Data"
@@ -801,7 +804,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref> PeerManager<D
                                                                                peer.pending_read_buffer = [0; 18].to_vec();
                                                                                peer.pending_read_is_header = true;
 
-                                                                               let mut reader = ::std::io::Cursor::new(&msg_data[..]);
+                                                                               let mut reader = io::Cursor::new(&msg_data[..]);
                                                                                let message_result = wire::read(&mut reader);
                                                                                let message = match message_result {
                                                                                        Ok(x) => x,
index 9946cc24a3cb2485597dee37cac9c22d1efa3fa1..bbdb5bfac6bae9270e30bb99434c818020a3d475 100644 (file)
@@ -10,6 +10,7 @@
 //! Further functional tests which test blockchain reorganizations.
 
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
+use chain::transaction::OutPoint;
 use chain::{Confirm, Watch};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
 use ln::features::InitFeatures;
@@ -20,7 +21,10 @@ use util::test_utils;
 use util::ser::{ReadableArgs, Writeable};
 
 use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::opcodes;
 use bitcoin::hash_types::BlockHash;
+use bitcoin::secp256k1::Secp256k1;
 
 use prelude::*;
 use core::mem;
@@ -132,8 +136,9 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                connect_block(&nodes[1], &block);
 
                // ChannelManager only polls chain::Watch::release_pending_monitor_events when we
-               // probe it for events, so we probe non-message events here (which should still end up empty):
-               assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+               // probe it for events, so we probe non-message events here (which should just be the
+               // PaymentForwarded event).
+               expect_payment_forwarded!(nodes[1], Some(1000), true);
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
                let block = Block {
@@ -427,3 +432,112 @@ fn test_set_outpoints_partial_claiming() {
                node_txn.clear();
        }
 }
+
+fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
+       // In previous code, detection of to_remote outputs in a counterparty commitment transaction
+       // was dependent on whether a local commitment transaction had been seen on-chain previously.
+       // This resulted in some edge cases around not being able to generate a SpendableOutput event
+       // after a reorg.
+       //
+       // Here, we test this by first confirming one set of commitment transactions, then
+       // disconnecting them and reconnecting another. We then confirm them and check that the correct
+       // SpendableOutput event is generated.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       *nodes[0].connect_style.borrow_mut() = style;
+       *nodes[1].connect_style.borrow_mut() = style;
+
+       let (_, _, chan_id, funding_tx) =
+               create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+       let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+       let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
+       let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
+
+       mine_transaction(&nodes[0], &remote_txn_a[0]);
+       mine_transaction(&nodes[1], &remote_txn_a[0]);
+
+       assert!(nodes[0].node.list_channels().is_empty());
+       check_closed_broadcast!(nodes[0], true);
+       check_added_monitors!(nodes[0], 1);
+       assert!(nodes[1].node.list_channels().is_empty());
+       check_closed_broadcast!(nodes[1], true);
+       check_added_monitors!(nodes[1], 1);
+
+       // Drop transactions broadcasted in response to the first commitment transaction (we have good
+       // test coverage of these things already elsewhere).
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+       assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+
+       assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+       assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+       disconnect_blocks(&nodes[0], 1);
+       disconnect_blocks(&nodes[1], 1);
+
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+       assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+       assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+       mine_transaction(&nodes[0], &remote_txn_b[0]);
+       mine_transaction(&nodes[1], &remote_txn_b[0]);
+
+       assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+       assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+       let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
+       assert_eq!(node_a_spendable.len(), 1);
+       if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
+               assert_eq!(outputs.len(), 1);
+               let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+               check_spends!(spend_tx, remote_txn_b[0]);
+       }
+
+       // nodes[1] is waiting for the to_self_delay to expire, which is many more than
+       // ANTI_REORG_DELAY. Instead, walk it back and confirm the original remote_txn_a commitment
+       // again and check that nodes[1] generates a similar spendable output.
+       // Technically a reorg of ANTI_REORG_DELAY violates our assumptions, so this is undefined by
+       // our API spec, but we currently handle this correctly and there's little reason we shouldn't
+       // in the future.
+       assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+       disconnect_blocks(&nodes[1], ANTI_REORG_DELAY);
+       mine_transaction(&nodes[1], &remote_txn_a[0]);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+       let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
+       assert_eq!(node_b_spendable.len(), 1);
+       if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
+               assert_eq!(outputs.len(), 1);
+               let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+                       Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+               check_spends!(spend_tx, remote_txn_a[0]);
+       }
+}
+
+#[test]
+fn test_to_remote_after_local_detection() {
+       do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirst);
+       do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirst);
+       do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstSkippingBlocks);
+       do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
+}
index 0ee280b50e4e72ffc4290333a960a9a09853ae93..e3ff3095219e7abf1efe8ff829ad1d2b7ac65bd0 100644 (file)
@@ -18,6 +18,7 @@
 //!
 //! [BOLT #1]: https://github.com/lightningnetwork/lightning-rfc/blob/master/01-messaging.md
 
+use io;
 use ln::msgs;
 use util::ser::{Readable, Writeable, Writer};
 
@@ -119,7 +120,7 @@ impl ::core::fmt::Display for MessageType {
 /// # Errors
 ///
 /// Returns an error if the message payload code not be decoded as the specified type.
-pub fn read<R: ::std::io::Read>(buffer: &mut R) -> Result<Message, msgs::DecodeError> {
+pub fn read<R: io::Read>(buffer: &mut R) -> Result<Message, msgs::DecodeError> {
        let message_type = <u16 as Readable>::read(buffer)?;
        match message_type {
                msgs::Init::TYPE => {
@@ -218,7 +219,7 @@ pub fn read<R: ::std::io::Read>(buffer: &mut R) -> Result<Message, msgs::DecodeE
 /// # Errors
 ///
 /// Returns an I/O error if the write could not be completed.
-pub fn write<M: Encode + Writeable, W: Writer>(message: &M, buffer: &mut W) -> Result<(), ::std::io::Error> {
+pub fn write<M: Encode + Writeable, W: Writer>(message: &M, buffer: &mut W) -> Result<(), io::Error> {
        M::TYPE.write(buffer)?;
        message.write(buffer)
 }
@@ -361,35 +362,35 @@ mod tests {
        #[test]
        fn read_empty_buffer() {
                let buffer = [];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                assert!(read(&mut reader).is_err());
        }
 
        #[test]
        fn read_incomplete_type() {
                let buffer = &ENCODED_PONG[..1];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                assert!(read(&mut reader).is_err());
        }
 
        #[test]
        fn read_empty_payload() {
                let buffer = &ENCODED_PONG[..2];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                assert!(read(&mut reader).is_err());
        }
 
        #[test]
        fn read_invalid_message() {
                let buffer = &ENCODED_PONG[..4];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                assert!(read(&mut reader).is_err());
        }
 
        #[test]
        fn read_known_message() {
                let buffer = &ENCODED_PONG[..];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let message = read(&mut reader).unwrap();
                match message {
                        Message::Pong(_) => (),
@@ -400,7 +401,7 @@ mod tests {
        #[test]
        fn read_unknown_message() {
                let buffer = &::core::u16::MAX.to_be_bytes();
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let message = read(&mut reader).unwrap();
                match message {
                        Message::Unknown(MessageType(::core::u16::MAX)) => (),
@@ -426,7 +427,7 @@ mod tests {
                let mut buffer = Vec::new();
                assert!(write(&message, &mut buffer).is_ok());
 
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let decoded_message = read(&mut reader).unwrap();
                match decoded_message {
                        Message::Pong(msgs::Pong { byteslen: 2u16 }) => (),
@@ -464,7 +465,7 @@ mod tests {
        }
 
        fn check_init_msg(buffer: Vec<u8>, expect_unknown: bool) {
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let decoded_msg = read(&mut reader).unwrap();
                match decoded_msg {
                        Message::Init(msgs::Init { features }) => {
@@ -483,7 +484,7 @@ mod tests {
        fn read_lnd_node_announcement() {
                // Taken from lnd v0.9.0-beta.
                let buffer = vec![1, 1, 91, 164, 146, 213, 213, 165, 21, 227, 102, 33, 105, 179, 214, 21, 221, 175, 228, 93, 57, 177, 191, 127, 107, 229, 31, 50, 21, 81, 179, 71, 39, 18, 35, 2, 89, 224, 110, 123, 66, 39, 148, 246, 177, 85, 12, 19, 70, 226, 173, 132, 156, 26, 122, 146, 71, 213, 247, 48, 93, 190, 185, 177, 12, 172, 0, 3, 2, 162, 161, 94, 103, 195, 37, 2, 37, 242, 97, 140, 2, 111, 69, 85, 39, 118, 30, 221, 99, 254, 120, 49, 103, 22, 170, 227, 111, 172, 164, 160, 49, 68, 138, 116, 16, 22, 206, 107, 51, 153, 255, 97, 108, 105, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 1, 172, 21, 0, 2, 38, 7];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let decoded_msg = read(&mut reader).unwrap();
                match decoded_msg {
                        Message::NodeAnnouncement(msgs::NodeAnnouncement { contents: msgs::UnsignedNodeAnnouncement { features, ..}, ..}) => {
@@ -500,7 +501,7 @@ mod tests {
        fn read_lnd_chan_announcement() {
                // Taken from lnd v0.9.0-beta.
                let buffer = vec![1, 0, 82, 238, 153, 33, 128, 87, 215, 2, 28, 241, 140, 250, 98, 255, 56, 5, 79, 240, 214, 231, 172, 35, 240, 171, 44, 9, 78, 91, 8, 193, 102, 5, 17, 178, 142, 106, 180, 183, 46, 38, 217, 212, 25, 236, 69, 47, 92, 217, 181, 221, 161, 205, 121, 201, 99, 38, 158, 216, 186, 193, 230, 86, 222, 6, 206, 67, 22, 255, 137, 212, 141, 161, 62, 134, 76, 48, 241, 54, 50, 167, 187, 247, 73, 27, 74, 1, 129, 185, 197, 153, 38, 90, 255, 138, 39, 161, 102, 172, 213, 74, 107, 88, 150, 90, 0, 49, 104, 7, 182, 184, 194, 219, 181, 172, 8, 245, 65, 226, 19, 228, 101, 145, 25, 159, 52, 31, 58, 93, 53, 59, 218, 91, 37, 84, 103, 17, 74, 133, 33, 35, 2, 203, 101, 73, 19, 94, 175, 122, 46, 224, 47, 168, 128, 128, 25, 26, 25, 214, 52, 247, 43, 241, 117, 52, 206, 94, 135, 156, 52, 164, 143, 234, 58, 185, 50, 185, 140, 198, 174, 71, 65, 18, 105, 70, 131, 172, 137, 0, 164, 51, 215, 143, 117, 119, 217, 241, 197, 177, 227, 227, 170, 199, 114, 7, 218, 12, 107, 30, 191, 236, 203, 21, 61, 242, 48, 192, 90, 233, 200, 199, 111, 162, 68, 234, 54, 219, 1, 233, 66, 5, 82, 74, 84, 211, 95, 199, 245, 202, 89, 223, 102, 124, 62, 166, 253, 253, 90, 180, 118, 21, 61, 110, 37, 5, 96, 167, 0, 0, 6, 34, 110, 70, 17, 26, 11, 89, 202, 175, 18, 96, 67, 235, 91, 191, 40, 195, 79, 58, 94, 51, 42, 31, 199, 178, 183, 60, 241, 136, 145, 15, 0, 2, 65, 0, 0, 1, 0, 0, 2, 37, 242, 97, 140, 2, 111, 69, 85, 39, 118, 30, 221, 99, 254, 120, 49, 103, 22, 170, 227, 111, 172, 164, 160, 49, 68, 138, 116, 16, 22, 206, 107, 3, 54, 61, 144, 88, 171, 247, 136, 208, 99, 9, 135, 37, 201, 178, 253, 136, 0, 185, 235, 68, 160, 106, 110, 12, 46, 21, 125, 204, 18, 75, 234, 16, 3, 42, 171, 28, 52, 224, 11, 30, 30, 253, 156, 148, 175, 203, 121, 250, 111, 122, 195, 84, 122, 77, 183, 56, 135, 101, 88, 41, 60, 191, 99, 232, 85, 2, 36, 17, 156, 11, 8, 12, 189, 177, 68, 88, 28, 15, 207, 21, 179, 151, 56, 226, 158, 148, 3, 120, 113, 177, 243, 184, 17, 173, 37, 46, 222, 16];
-               let mut reader = ::std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
                let decoded_msg = read(&mut reader).unwrap();
                match decoded_msg {
                        Message::ChannelAnnouncement(msgs::ChannelAnnouncement { contents: msgs::UnsignedChannelAnnouncement { features, ..}, ..}) => {
index 9e4813e9155926fac85b20d307af1bb9ac8a1dc8..486b71578f3fde8775e1d7fe86bd0283c5ef7d8e 100644 (file)
@@ -32,6 +32,7 @@ use util::logger::{Logger, Level};
 use util::events::{MessageSendEvent, MessageSendEventsProvider};
 use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
 
+use io;
 use prelude::*;
 use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry};
 use core::{cmp, fmt};
@@ -611,7 +612,7 @@ const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
 impl Writeable for NetworkGraph {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
 
                self.genesis_hash.write(writer)?;
@@ -632,7 +633,7 @@ impl Writeable for NetworkGraph {
 }
 
 impl Readable for NetworkGraph {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
@@ -1087,6 +1088,7 @@ mod tests {
        use bitcoin::secp256k1::key::{PublicKey, SecretKey};
        use bitcoin::secp256k1::{All, Secp256k1};
 
+       use io;
        use prelude::*;
        use sync::Arc;
 
@@ -1996,7 +1998,7 @@ mod tests {
                assert!(!network.get_nodes().is_empty());
                assert!(!network.get_channels().is_empty());
                network.write(&mut w).unwrap();
-               assert!(<NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network);
+               assert!(<NetworkGraph>::read(&mut io::Cursor::new(&w.0)).unwrap() == *network);
        }
 
        #[test]
index eda25572a22710d3df0220c411514b35a844deee..13356cc1c74334193fed69be42a05b718d2ab89d 100644 (file)
@@ -21,6 +21,7 @@ use routing::network_graph::{NetworkGraph, RoutingFees};
 use util::ser::{Writeable, Readable};
 use util::logger::Logger;
 
+use io;
 use prelude::*;
 use alloc::collections::BinaryHeap;
 use core::cmp;
@@ -74,7 +75,7 @@ const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
 impl Writeable for Route {
-       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
                (self.paths.len() as u64).write(writer)?;
                for hops in self.paths.iter() {
@@ -89,7 +90,7 @@ impl Writeable for Route {
 }
 
 impl Readable for Route {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Route, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Route, DecodeError> {
                let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
                let path_count: u64 = Readable::read(reader)?;
                let mut paths = Vec::with_capacity(cmp::min(path_count, 128) as usize);
@@ -3830,7 +3831,7 @@ mod tests {
                }
        }
 
-       #[cfg(not(feature = "no_std"))]
+       #[cfg(not(feature = "no-std"))]
        pub(super) fn random_init_seed() -> u64 {
                // Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
                use core::hash::{BuildHasher, Hasher};
@@ -3838,11 +3839,11 @@ mod tests {
                println!("Using seed of {}", seed);
                seed
        }
-       #[cfg(not(feature = "no_std"))]
+       #[cfg(not(feature = "no-std"))]
        use util::ser::Readable;
 
        #[test]
-       #[cfg(not(feature = "no_std"))]
+       #[cfg(not(feature = "no-std"))]
        fn generate_routes() {
                let mut d = match super::test_utils::get_route_file() {
                        Ok(f) => f,
@@ -3870,7 +3871,7 @@ mod tests {
        }
 
        #[test]
-       #[cfg(not(feature = "no_std"))]
+       #[cfg(not(feature = "no-std"))]
        fn generate_routes_mpp() {
                let mut d = match super::test_utils::get_route_file() {
                        Ok(f) => f,
@@ -3898,7 +3899,7 @@ mod tests {
        }
 }
 
-#[cfg(all(test, not(feature = "no_std")))]
+#[cfg(all(test, not(feature = "no-std")))]
 pub(crate) mod test_utils {
        use std::fs::File;
        /// Tries to open a network graph file, or panics with a URL to fetch it.
@@ -3925,7 +3926,7 @@ pub(crate) mod test_utils {
        }
 }
 
-#[cfg(all(test, feature = "unstable", not(feature = "no_std")))]
+#[cfg(all(test, feature = "unstable", not(feature = "no-std")))]
 mod benches {
        use super::*;
        use util::logger::{Logger, Record};
index cbf08f46c226a568d1bebc8c368e74d518624433..508ecd635c45d007b6fb6de37b57d816146b3750 100644 (file)
@@ -9,7 +9,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use std::io;
+use io;
 
 #[cfg(not(feature = "fuzztarget"))]
 mod real_chacha {
index d7ddc38b3ddfee2641ba414108ca5b541878f503..baa9e7cd16d98b05be00abcf5d7ebc46a75cbbf8 100644 (file)
@@ -11,6 +11,7 @@ use ln::chan_utils::{HTLCOutputInCommitment, ChannelPublicKeys, HolderCommitment
 use ln::{chan_utils, msgs};
 use chain::keysinterface::{Sign, InMemorySigner, BaseSign};
 
+use io;
 use prelude::*;
 use core::cmp;
 use sync::{Mutex, Arc};
@@ -22,7 +23,7 @@ use bitcoin::secp256k1;
 use bitcoin::secp256k1::key::{SecretKey, PublicKey};
 use bitcoin::secp256k1::{Secp256k1, Signature};
 use util::ser::{Writeable, Writer, Readable};
-use std::io::Error;
+use io::Error;
 use ln::msgs::DecodeError;
 
 /// Initial value for revoked commitment downward counter
@@ -181,7 +182,7 @@ impl Writeable for EnforcingSigner {
 }
 
 impl Readable for EnforcingSigner {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                let inner = Readable::read(reader)?;
                let last_commitment_number = Readable::read(reader)?;
                Ok(EnforcingSigner {
index 876dfe6abb551f359a6c67c60fe05c1d104b81ed..1780483cb8b9e01bcab8d5f0c18e812522d27a7f 100644 (file)
@@ -23,6 +23,7 @@ use bitcoin::blockdata::script::Script;
 
 use bitcoin::secp256k1::key::PublicKey;
 
+use io;
 use prelude::*;
 use core::time::Duration;
 use core::ops::Deref;
@@ -150,10 +151,31 @@ pub enum Event {
                /// The outputs which you should store as spendable by you.
                outputs: Vec<SpendableOutputDescriptor>,
        },
+       /// This event is generated when a payment has been successfully forwarded through us and a
+       /// forwarding fee earned.
+       PaymentForwarded {
+               /// The fee, in milli-satoshis, which was earned as a result of the payment.
+               ///
+               /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
+               /// was pending, the amount the next hop claimed will have been rounded down to the nearest
+               /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still
+               /// claimed the full value in millisatoshis from the source. In this case,
+               /// `claim_from_onchain_tx` will be set.
+               ///
+               /// If the channel which sent us the payment has been force-closed, we will claim the funds
+               /// via an on-chain transaction. In that case we do not yet know the on-chain transaction
+               /// fees which we will spend and will instead set this to `None`. It is possible duplicate
+               /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is
+               /// `None`.
+               fee_earned_msat: Option<u64>,
+               /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain
+               /// transaction.
+               claim_from_onchain_tx: bool,
+       },
 }
 
 impl Writeable for Event {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self {
                        &Event::FundingGenerationReady { .. } => {
                                0u8.write(writer)?;
@@ -217,12 +239,19 @@ impl Writeable for Event {
                                        (0, VecWriteWrapper(outputs), required),
                                });
                        },
+                       &Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+                               7u8.write(writer)?;
+                               write_tlv_fields!(writer, {
+                                       (0, fee_earned_msat, option),
+                                       (2, claim_from_onchain_tx, required),
+                               });
+                       },
                }
                Ok(())
        }
 }
 impl MaybeReadable for Event {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
+       fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
                match Readable::read(reader)? {
                        0u8 => Ok(None),
                        1u8 => {
@@ -312,6 +341,20 @@ impl MaybeReadable for Event {
                                };
                                f()
                        },
+                       7u8 => {
+                               let f = || {
+                                       let mut fee_earned_msat = None;
+                                       let mut claim_from_onchain_tx = false;
+                                       read_tlv_fields!(reader, {
+                                               (0, fee_earned_msat, option),
+                                               (2, claim_from_onchain_tx, required),
+                                       });
+                                       Ok(Some(Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx }))
+                               };
+                               f()
+                       },
+                       // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
+                       x if x % 2 == 1 => Ok(None),
                        _ => Err(msgs::DecodeError::InvalidValue)
                }
        }
index fe331f6bf4d748cb7f74a22607b87f0622a03589..b27cf4b348c3d98fc805dd7478dbe640910d80f4 100644 (file)
@@ -11,7 +11,8 @@
 //! as ChannelsManagers and ChannelMonitors.
 
 use prelude::*;
-use std::io::{Read, Write};
+use io::{self, Read, Write};
+use io_extras::{copy, sink};
 use core::hash::Hash;
 use sync::Mutex;
 use core::cmp;
@@ -42,7 +43,7 @@ pub const MAX_BUF_SIZE: usize = 64 * 1024;
 /// (C-not exported) as we only export serialization to/from byte arrays instead
 pub trait Writer {
        /// Writes the given buf out. See std::io::Write::write_all for more
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error>;
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error>;
        /// Hints that data of the given size is about the be written. This may not always be called
        /// prior to data being written and may be safely ignored.
        fn size_hint(&mut self, size: usize);
@@ -50,8 +51,8 @@ pub trait Writer {
 
 impl<W: Write> Writer for W {
        #[inline]
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
-               <Self as ::std::io::Write>::write_all(self, buf)
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
+               <Self as io::Write>::write_all(self, buf)
        }
        #[inline]
        fn size_hint(&mut self, _size: usize) { }
@@ -60,16 +61,16 @@ impl<W: Write> Writer for W {
 pub(crate) struct WriterWriteAdaptor<'a, W: Writer + 'a>(pub &'a mut W);
 impl<'a, W: Writer + 'a> Write for WriterWriteAdaptor<'a, W> {
        #[inline]
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
                self.0.write_all(buf)
        }
        #[inline]
-       fn write(&mut self, buf: &[u8]) -> Result<usize, ::std::io::Error> {
+       fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
                self.0.write_all(buf)?;
                Ok(buf.len())
        }
        #[inline]
-       fn flush(&mut self) -> Result<(), ::std::io::Error> {
+       fn flush(&mut self) -> Result<(), io::Error> {
                Ok(())
        }
 }
@@ -77,7 +78,7 @@ impl<'a, W: Writer + 'a> Write for WriterWriteAdaptor<'a, W> {
 pub(crate) struct VecWriter(pub Vec<u8>);
 impl Writer for VecWriter {
        #[inline]
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
                self.0.extend_from_slice(buf);
                Ok(())
        }
@@ -92,7 +93,7 @@ impl Writer for VecWriter {
 pub(crate) struct LengthCalculatingWriter(pub usize);
 impl Writer for LengthCalculatingWriter {
        #[inline]
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
                self.0 += buf.len();
                Ok(())
        }
@@ -119,7 +120,7 @@ impl<R: Read> FixedLengthReader<R> {
 
        #[inline]
        pub fn eat_remaining(&mut self) -> Result<(), DecodeError> {
-               ::std::io::copy(self, &mut ::std::io::sink()).unwrap();
+               copy(self, &mut sink()).unwrap();
                if self.bytes_read != self.total_bytes {
                        Err(DecodeError::ShortRead)
                } else {
@@ -129,7 +130,7 @@ impl<R: Read> FixedLengthReader<R> {
 }
 impl<R: Read> Read for FixedLengthReader<R> {
        #[inline]
-       fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
+       fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
                if self.total_bytes == self.bytes_read {
                        Ok(0)
                } else {
@@ -158,7 +159,7 @@ impl<R: Read> ReadTrackingReader<R> {
 }
 impl<R: Read> Read for ReadTrackingReader<R> {
        #[inline]
-       fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
+       fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
                match self.read.read(dest) {
                        Ok(0) => Ok(0),
                        Ok(len) => {
@@ -175,7 +176,7 @@ impl<R: Read> Read for ReadTrackingReader<R> {
 /// (C-not exported) as we only export serialization to/from byte arrays instead
 pub trait Writeable {
        /// Writes self out to the given Writer
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error>;
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error>;
 
        /// Writes self out to a Vec<u8>
        fn encode(&self) -> Vec<u8> {
@@ -206,7 +207,7 @@ pub trait Writeable {
 }
 
 impl<'a, T: Writeable> Writeable for &'a T {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (*self).write(writer) }
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { (*self).write(writer) }
 }
 
 /// A trait that various rust-lightning types implement allowing them to be read in from a Read
@@ -252,7 +253,7 @@ impl<T: Readable> Readable for OptionDeserWrapper<T> {
 pub(crate) struct VecWriteWrapper<'a, T: Writeable>(pub &'a Vec<T>);
 impl<'a, T: Writeable> Writeable for VecWriteWrapper<'a, T> {
        #[inline]
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                for ref v in self.0.iter() {
                        v.write(writer)?;
                }
@@ -283,7 +284,7 @@ impl<T: Readable> Readable for VecReadWrapper<T> {
 pub(crate) struct U48(pub u64);
 impl Writeable for U48 {
        #[inline]
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                writer.write_all(&be48_to_array(self.0))
        }
 }
@@ -306,7 +307,7 @@ impl Readable for U48 {
 pub(crate) struct BigSize(pub u64);
 impl Writeable for BigSize {
        #[inline]
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                match self.0 {
                        0...0xFC => {
                                (self.0 as u8).write(writer)
@@ -370,13 +371,13 @@ macro_rules! impl_writeable_primitive {
        ($val_type:ty, $len: expr) => {
                impl Writeable for $val_type {
                        #[inline]
-                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                                writer.write_all(&self.to_be_bytes())
                        }
                }
                impl Writeable for HighZeroBytesDroppedVarInt<$val_type> {
                        #[inline]
-                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                                // Skip any full leading 0 bytes when writing (in BE):
                                writer.write_all(&self.0.to_be_bytes()[(self.0.leading_zeros()/8) as usize..$len])
                        }
@@ -424,7 +425,7 @@ impl_writeable_primitive!(u16, 2);
 
 impl Writeable for u8 {
        #[inline]
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                writer.write_all(&[*self])
        }
 }
@@ -439,7 +440,7 @@ impl Readable for u8 {
 
 impl Writeable for bool {
        #[inline]
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                writer.write_all(&[if *self {1} else {0}])
        }
 }
@@ -461,7 +462,7 @@ macro_rules! impl_array {
                impl Writeable for [u8; $size]
                {
                        #[inline]
-                       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                                w.write_all(self)
                        }
                }
@@ -494,7 +495,7 @@ impl<K, V> Writeable for HashMap<K, V>
              V: Writeable
 {
        #[inline]
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
        (self.len() as u16).write(w)?;
                for (key, value) in self.iter() {
                        key.write(w)?;
@@ -522,7 +523,7 @@ impl<K, V> Readable for HashMap<K, V>
 // Vectors
 impl Writeable for Vec<u8> {
        #[inline]
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                (self.len() as u16).write(w)?;
                w.write_all(&self)
        }
@@ -540,7 +541,7 @@ impl Readable for Vec<u8> {
 }
 impl Writeable for Vec<Signature> {
        #[inline]
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                (self.len() as u16).write(w)?;
                for e in self.iter() {
                        e.write(w)?;
@@ -566,7 +567,7 @@ impl Readable for Vec<Signature> {
 }
 
 impl Writeable for Script {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                (self.len() as u16).write(w)?;
                w.write_all(self.as_bytes())
        }
@@ -582,7 +583,7 @@ impl Readable for Script {
 }
 
 impl Writeable for PublicKey {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.serialize().write(w)
        }
        #[inline]
@@ -602,7 +603,7 @@ impl Readable for PublicKey {
 }
 
 impl Writeable for SecretKey {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                let mut ser = [0; SECRET_KEY_SIZE];
                ser.copy_from_slice(&self[..]);
                ser.write(w)
@@ -624,7 +625,7 @@ impl Readable for SecretKey {
 }
 
 impl Writeable for Sha256dHash {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.write_all(&self[..])
        }
 }
@@ -639,7 +640,7 @@ impl Readable for Sha256dHash {
 }
 
 impl Writeable for Signature {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.serialize_compact().write(w)
        }
        #[inline]
@@ -659,7 +660,7 @@ impl Readable for Signature {
 }
 
 impl Writeable for PaymentPreimage {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)
        }
 }
@@ -672,7 +673,7 @@ impl Readable for PaymentPreimage {
 }
 
 impl Writeable for PaymentHash {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)
        }
 }
@@ -685,7 +686,7 @@ impl Readable for PaymentHash {
 }
 
 impl Writeable for PaymentSecret {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)
        }
 }
@@ -698,7 +699,7 @@ impl Readable for PaymentSecret {
 }
 
 impl<T: Writeable> Writeable for Box<T> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                T::write(&**self, w)
        }
 }
@@ -710,7 +711,7 @@ impl<T: Readable> Readable for Box<T> {
 }
 
 impl<T: Writeable> Writeable for Option<T> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                match *self {
                        None => 0u8.write(w)?,
                        Some(ref data) => {
@@ -736,7 +737,7 @@ impl<T: Readable> Readable for Option<T>
 }
 
 impl Writeable for Txid {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.write_all(&self[..])
        }
 }
@@ -751,7 +752,7 @@ impl Readable for Txid {
 }
 
 impl Writeable for BlockHash {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                w.write_all(&self[..])
        }
 }
@@ -766,7 +767,7 @@ impl Readable for BlockHash {
 }
 
 impl Writeable for OutPoint {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.txid.write(w)?;
                self.vout.write(w)?;
                Ok(())
@@ -787,7 +788,7 @@ impl Readable for OutPoint {
 macro_rules! impl_consensus_ser {
        ($bitcoin_type: ty) => {
                impl Writeable for $bitcoin_type {
-                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                                match self.consensus_encode(WriterWriteAdaptor(writer)) {
                                        Ok(_) => Ok(()),
                                        Err(e) => Err(e),
@@ -799,7 +800,7 @@ macro_rules! impl_consensus_ser {
                        fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
                                match consensus::encode::Decodable::consensus_decode(r) {
                                        Ok(t) => Ok(t),
-                                       Err(consensus::encode::Error::Io(ref e)) if e.kind() == ::std::io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
+                                       Err(consensus::encode::Error::Io(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
                                        Err(consensus::encode::Error::Io(e)) => Err(DecodeError::Io(e.kind())),
                                        Err(_) => Err(DecodeError::InvalidValue),
                                }
@@ -817,7 +818,7 @@ impl<T: Readable> Readable for Mutex<T> {
        }
 }
 impl<T: Writeable> Writeable for Mutex<T> {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.lock().unwrap().write(w)
        }
 }
@@ -830,7 +831,7 @@ impl<A: Readable, B: Readable> Readable for (A, B) {
        }
 }
 impl<A: Writeable, B: Writeable> Writeable for (A, B) {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)?;
                self.1.write(w)
        }
@@ -845,7 +846,7 @@ impl<A: Readable, B: Readable, C: Readable> Readable for (A, B, C) {
        }
 }
 impl<A: Writeable, B: Writeable, C: Writeable> Writeable for (A, B, C) {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
                self.0.write(w)?;
                self.1.write(w)?;
                self.2.write(w)
index 86149d22f9ebf4b679909527bfb3d950adb2fe1d..5d5171adbb4401c1dd1fb5faf3b1f8afab919db2 100644 (file)
@@ -93,7 +93,7 @@ macro_rules! check_tlv_order {
                #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
                let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type;
                if invalid_order {
-                       Err(DecodeError::InvalidValue)?
+                       return Err(DecodeError::InvalidValue);
                }
        }};
        ($last_seen_type: expr, $typ: expr, $type: expr, option) => {{
@@ -109,7 +109,7 @@ macro_rules! check_missing_tlv {
                #[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
                let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type;
                if missing_req_type {
-                       Err(DecodeError::InvalidValue)?
+                       return Err(DecodeError::InvalidValue);
                }
        }};
        ($last_seen_type: expr, $type: expr, vec_type) => {{
@@ -149,12 +149,12 @@ macro_rules! decode_tlv_stream {
                                match ser::Readable::read(&mut tracking_reader) {
                                        Err(DecodeError::ShortRead) => {
                                                if !tracking_reader.have_read {
-                                                       break 'tlv_read
+                                                       break 'tlv_read;
                                                } else {
-                                                       Err(DecodeError::ShortRead)?
+                                                       return Err(DecodeError::ShortRead);
                                                }
                                        },
-                                       Err(e) => Err(e)?,
+                                       Err(e) => return Err(e),
                                        Ok(t) => t,
                                }
                        };
@@ -162,7 +162,7 @@ macro_rules! decode_tlv_stream {
                        // Types must be unique and monotonically increasing:
                        match last_seen_type {
                                Some(t) if typ.0 <= t => {
-                                       Err(DecodeError::InvalidValue)?
+                                       return Err(DecodeError::InvalidValue);
                                },
                                _ => {},
                        }
@@ -180,11 +180,11 @@ macro_rules! decode_tlv_stream {
                                        decode_tlv!(s, $field, $fieldty);
                                        if s.bytes_remain() {
                                                s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
-                                               Err(DecodeError::InvalidValue)?
+                                               return Err(DecodeError::InvalidValue);
                                        }
                                },)*
                                x if x % 2 == 0 => {
-                                       Err(DecodeError::UnknownRequiredFeature)?
+                                       return Err(DecodeError::UnknownRequiredFeature);
                                },
                                _ => {},
                        }
@@ -200,7 +200,7 @@ macro_rules! decode_tlv_stream {
 macro_rules! impl_writeable {
        ($st:ident, $len: expr, {$($field:ident),*}) => {
                impl ::util::ser::Writeable for $st {
-                       fn write<W: ::util::ser::Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: ::util::ser::Writer>(&self, w: &mut W) -> Result<(), $crate::io::Error> {
                                if $len != 0 {
                                        w.size_hint($len);
                                }
@@ -235,7 +235,7 @@ macro_rules! impl_writeable {
                }
 
                impl ::util::ser::Readable for $st {
-                       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+                       fn read<R: $crate::io::Read>(r: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
                                Ok(Self {
                                        $($field: ::util::ser::Readable::read(r)?),*
                                })
@@ -246,7 +246,7 @@ macro_rules! impl_writeable {
 macro_rules! impl_writeable_len_match {
        ($struct: ident, $cmp: tt, ($calc_len: expr), {$({$match: pat, $length: expr}),*}, {$($field:ident),*}) => {
                impl Writeable for $struct {
-                       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: Writer>(&self, w: &mut W) -> Result<(), $crate::io::Error> {
                                let len = match *self {
                                        $($match => $length,)*
                                };
@@ -282,7 +282,7 @@ macro_rules! impl_writeable_len_match {
                }
 
                impl ::util::ser::Readable for $struct {
-                       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+                       fn read<R: $crate::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
                                Ok(Self {
                                        $($field: Readable::read(r)?),*
                                })
@@ -387,7 +387,7 @@ macro_rules! init_tlv_field_var {
 macro_rules! impl_writeable_tlv_based {
        ($st: ident, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => {
                impl ::util::ser::Writeable for $st {
-                       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
                                write_tlv_fields!(writer, {
                                        $(($type, self.$field, $fieldty)),*
                                });
@@ -412,7 +412,7 @@ macro_rules! impl_writeable_tlv_based {
                }
 
                impl ::util::ser::Readable for $st {
-                       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+                       fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
                                $(
                                        init_tlv_field_var!($field, $fieldty);
                                )*
@@ -445,7 +445,7 @@ macro_rules! impl_writeable_tlv_based_enum {
        ),* $(,)*;
        $(($tuple_variant_id: expr, $tuple_variant_name: ident)),*  $(,)*) => {
                impl ::util::ser::Writeable for $st {
-                       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                       fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
                                match self {
                                        $($st::$variant_name { $(ref $field),* } => {
                                                let id: u8 = $variant_id;
@@ -465,7 +465,7 @@ macro_rules! impl_writeable_tlv_based_enum {
                }
 
                impl ::util::ser::Readable for $st {
-                       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+                       fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
                                let id: u8 = ::util::ser::Readable::read(reader)?;
                                match id {
                                        $($variant_id => {
@@ -490,7 +490,7 @@ macro_rules! impl_writeable_tlv_based_enum {
                                                Ok($st::$tuple_variant_name(Readable::read(reader)?))
                                        }),*
                                        _ => {
-                                               Err(DecodeError::UnknownRequiredFeature)?
+                                               Err(DecodeError::UnknownRequiredFeature)
                                        },
                                }
                        }
@@ -500,8 +500,8 @@ macro_rules! impl_writeable_tlv_based_enum {
 
 #[cfg(test)]
 mod tests {
+       use io::{self, Cursor};
        use prelude::*;
-       use std::io::Cursor;
        use ln::msgs::DecodeError;
        use util::ser::{Writeable, HighZeroBytesDroppedVarInt, VecWriter};
        use bitcoin::secp256k1::PublicKey;
@@ -685,7 +685,7 @@ mod tests {
                do_test!(concat!("fd00fe", "02", "0226"), None, None, None, Some(550));
        }
 
-       fn do_simple_test_tlv_write() -> Result<(), ::std::io::Error> {
+       fn do_simple_test_tlv_write() -> Result<(), io::Error> {
                let mut stream = VecWriter(Vec::new());
 
                stream.0.clear();
index fbd5a26d9f327de34bf60ce0ed8d3cc35ff56406..a9a10451b2c1dd0825a2f985e13075c45b759961 100644 (file)
@@ -37,6 +37,7 @@ use bitcoin::secp256k1::recovery::RecoverableSignature;
 
 use regex;
 
+use io;
 use prelude::*;
 use core::time::Duration;
 use sync::{Mutex, Arc};
@@ -46,7 +47,7 @@ use chain::keysinterface::InMemorySigner;
 
 pub struct TestVecWriter(pub Vec<u8>);
 impl Writer for TestVecWriter {
-       fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+       fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
                self.0.extend_from_slice(buf);
                Ok(())
        }
@@ -75,7 +76,7 @@ impl keysinterface::KeysInterface for OnlyReadsKeysInterface {
        fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
 
        fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
-               EnforcingSigner::read(&mut std::io::Cursor::new(reader))
+               EnforcingSigner::read(&mut io::Cursor::new(reader))
        }
        fn sign_invoice(&self, _invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> { unreachable!(); }
 }
@@ -114,7 +115,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let mut w = TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                       &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
+                       &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
@@ -136,7 +137,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
                assert!(channelmonitor::ChannelMonitorUpdate::read(
-                               &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
+                               &mut io::Cursor::new(&w.0)).unwrap() == update);
 
                if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
                        assert_eq!(funding_txo.to_channel_id(), exp.0);
@@ -155,7 +156,7 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
                w.0.clear();
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                       &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
+                       &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
                assert!(new_monitor == *monitor);
                self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
 
@@ -481,7 +482,7 @@ impl keysinterface::KeysInterface for TestKeysInterface {
        }
 
        fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
-               let mut reader = std::io::Cursor::new(buffer);
+               let mut reader = io::Cursor::new(buffer);
 
                let inner: InMemorySigner = Readable::read(&mut reader)?;
                let revoked_commitment = self.make_revoked_commitment_cell(inner.commitment_seed);
index 5c02fe156aaeffad0355c9f95aaafc3fbba26c88..1ac9f5eb5c128d31c1b856bfff21b65932e08cbf 100644 (file)
@@ -15,6 +15,7 @@ use bitcoin::consensus::encode::VarInt;
 use ln::msgs::MAX_VALUE_MSAT;
 
 use prelude::*;
+use io_extras::sink;
 use core::cmp::Ordering;
 
 pub fn sort_outputs<T, C : Fn(&T, &T) -> Ordering>(outputs: &mut Vec<(TxOut, T)>, tie_breaker: C) {
@@ -56,7 +57,7 @@ pub(crate) fn maybe_add_change_output(tx: &mut Transaction, input_value: u64, wi
                script_pubkey: change_destination_script,
                value: 0,
        };
-       let change_len = change_output.consensus_encode(&mut std::io::sink()).unwrap();
+       let change_len = change_output.consensus_encode(&mut sink()).unwrap();
        let mut weight_with_change: i64 = tx.get_weight() as i64 + 2 + witness_max_weight as i64 + change_len as i64 * 4;
        // Include any extra bytes required to push an extra output.
        weight_with_change += (VarInt(tx.output.len() as u64 + 1).len() - VarInt(tx.output.len() as u64).len()) as i64 * 4;