jobs:
build:
strategy:
+ fail-fast: false
matrix:
platform: [ ubuntu-latest ]
toolchain: [ stable,
1.41.0,
# 1.45.2 is MSRV for lightning-net-tokio, lightning-block-sync, and coverage generation
1.45.2,
- # 1.49.0 is MSRV for no_std builds using hashbrown
- 1.49.0]
+ # 1.47.0 will be the MSRV for no-std builds using hashbrown once core2 is updated
+ 1.47.0]
include:
- toolchain: stable
build-net-tokio: true
platform: macos-latest
build-net-tokio: true
build-no-std: true
+ - toolchain: beta
+ platform: macos-latest
+ build-net-tokio: true
+ build-no-std: true
- toolchain: stable
platform: windows-latest
build-net-tokio: true
build-no-std: true
+ - toolchain: beta
+ platform: windows-latest
+ build-net-tokio: true
+ build-no-std: true
- toolchain: beta
build-net-tokio: true
build-no-std: true
build-net-tokio: true
build-no-std: false
coverage: true
- - toolchain: 1.49.0
- build-no-std: true
+ - toolchain: 1.47.0
+ build-no-std: false
runs-on: ${{ matrix.platform }}
steps:
- name: Checkout source code
- name: Test on Rust ${{ matrix.toolchain }} with net-tokio and full code-linking for coverage generation
if: matrix.coverage
run: RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always
- - name: Test on no_std bullds Rust ${{ matrix.toolchain }}
+ - name: Test on no-std bullds Rust ${{ matrix.toolchain }}
if: "matrix.build-no-std && !matrix.coverage"
run: |
cd lightning
- cargo test --verbose --color always --no-default-features --features no_std
- # check if there is a conflict between no_std and the default std feature
- cargo test --verbose --color always --features no_std
+ cargo test --verbose --color always --no-default-features --features no-std
+ # check if there is a conflict between no-std and the default std feature
+ cargo test --verbose --color always --features no-std
cd ..
- - name: Test on no_std bullds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
+ - name: Test on no-std builds Rust ${{ matrix.toolchain }} and full code-linking for coverage generation
if: "matrix.build-no-std && matrix.coverage"
run: |
cd lightning
- RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no_std
+ RUSTFLAGS="-C link-dead-code" cargo test --verbose --color always --no-default-features --features no-std
cd ..
- name: Test on Rust ${{ matrix.toolchain }}
if: "! matrix.build-net-tokio"
rustup component add clippy
- name: Run default clippy linting
run: |
- cargo clippy -- -Aclippy::erasing_op -Aclippy::never_loop -Aclippy::if_same_then_else
+ cargo clippy -- -Aclippy::erasing_op -Aclippy::never_loop -Aclippy::if_same_then_else -Dclippy::try_err
cargo doc
cargo doc --document-private-items
cd fuzz && cargo check --features=stdin_fuzz
-cd ../lightning && cargo check --no-default-features --features=no_std
+cd ../lightning && cargo check --no-default-features --features=no-std
[dependencies]
afl = { version = "0.4", optional = true }
lightning = { path = "../lightning", features = ["fuzztarget"] }
-bitcoin = { version = "0.26", features = ["fuzztarget", "secp-lowmemory"] }
+bitcoin = { version = "0.27", features = ["fuzztarget", "secp-lowmemory"] }
hex = "0.3"
honggfuzz = { version = "0.5", optional = true }
libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git", optional = true }
edition = "2018"
[dependencies]
-bitcoin = "0.26"
+bitcoin = "0.27"
lightning = { version = "0.0.99", path = "../lightning", features = ["allow_wallclock_use"] }
lightning-persister = { version = "0.0.99", path = "../lightning-persister" }
/// for unilateral chain closure fees are at risk.
pub struct BackgroundProcessor {
stop_thread: Arc<AtomicBool>,
- /// May be used to retrieve and handle the error if `BackgroundProcessor`'s thread
- /// exits due to an error while persisting.
- pub thread_handle: JoinHandle<Result<(), std::io::Error>>,
+ thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
}
#[cfg(not(test))]
}
impl BackgroundProcessor {
- /// Start a background thread that takes care of responsibilities enumerated in the top-level
- /// documentation.
+ /// Start a background thread that takes care of responsibilities enumerated in the [top-level
+ /// documentation].
///
- /// If `persist_manager` returns an error, then this thread will return said error (and
- /// `start()` will need to be called again to restart the `BackgroundProcessor`). Users should
- /// wait on [`thread_handle`]'s `join()` method to be able to tell if and when an error is
- /// returned, or implement `persist_manager` such that an error is never returned to the
- /// `BackgroundProcessor`
+ /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
+ /// `persist_manager` returns an error. In case of an error, the error is retrieved by calling
+ /// either [`join`] or [`stop`].
+ ///
+ /// Typically, users should either implement [`ChannelManagerPersister`] to never return an
+ /// error or call [`join`] and handle any error that may arise. For the latter case, the
+ /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
///
/// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
/// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
/// provided implementation.
///
- /// [`thread_handle`]: BackgroundProcessor::thread_handle
+ /// [top-level documentation]: Self
+ /// [`join`]: Self::join
+ /// [`stop`]: Self::stop
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
/// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
}
}
});
- Self { stop_thread: stop_thread_clone, thread_handle: handle }
+ Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
+ }
+
+ /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
+ /// [`ChannelManager`].
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the background thread has panicked such as while persisting or
+ /// handling events.
+ ///
+ /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+ pub fn join(mut self) -> Result<(), std::io::Error> {
+ assert!(self.thread_handle.is_some());
+ self.join_thread()
+ }
+
+ /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
+ /// [`ChannelManager`].
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the background thread has panicked such as while persisting or
+ /// handling events.
+ ///
+ /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+ pub fn stop(mut self) -> Result<(), std::io::Error> {
+ assert!(self.thread_handle.is_some());
+ self.stop_and_join_thread()
}
- /// Stop `BackgroundProcessor`'s thread.
- pub fn stop(self) -> Result<(), std::io::Error> {
+ fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
self.stop_thread.store(true, Ordering::Release);
- self.thread_handle.join().unwrap()
+ self.join_thread()
+ }
+
+ fn join_thread(&mut self) -> Result<(), std::io::Error> {
+ match self.thread_handle.take() {
+ Some(handle) => handle.join().unwrap(),
+ None => Ok(()),
+ }
+ }
+}
+
+impl Drop for BackgroundProcessor {
+ fn drop(&mut self) {
+ self.stop_and_join_thread().unwrap();
}
}
let persister = |_: &_| Err(std::io::Error::new(std::io::ErrorKind::Other, "test"));
let event_handler = |_| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
- let _ = bg_processor.thread_handle.join().unwrap().expect_err("Errored persisting manager: test");
+ match bg_processor.join() {
+ Ok(_) => panic!("Expected error persisting manager"),
+ Err(e) => {
+ assert_eq!(e.kind(), std::io::ErrorKind::Other);
+ assert_eq!(e.get_ref().unwrap().to_string(), "test");
+ },
+ }
}
#[test]
rpc-client = [ "serde", "serde_json", "chunked_transfer" ]
[dependencies]
-bitcoin = "0.26"
+bitcoin = "0.27"
lightning = { version = "0.0.99", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "net", "time" ], optional = true }
serde = { version = "1.0", features = ["derive"], optional = true }
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
- Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::Other),
+ Err(e) => {
+ assert!(e.to_string().contains("failed to lookup address information") ||
+ e.to_string().contains("No such host"), "{:?}", e);
+ },
Ok(_) => panic!("Expected error"),
}
}
readme = "README.md"
[dependencies]
-bech32 = "0.7"
+bech32 = "0.8"
lightning = { version = "0.0.99", path = "../lightning" }
secp256k1 = { version = "0.20", features = ["recovery"] }
num-traits = "0.2.8"
-bitcoin_hashes = "0.9.4"
+bitcoin_hashes = "0.10"
[dev-dependencies]
lightning = { version = "0.0.99", path = "../lightning", features = ["_test_utils"] }
honggfuzz = { version = "0.5", optional = true }
afl = { version = "0.4", optional = true }
lightning-invoice = { path = ".."}
-bech32 = "0.7"
+bech32 = "0.8"
# Prevent this from interfering with workspaces
[workspace]
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
- let (hrp, data) = bech32::decode(s)?;
+ let (hrp, data, var) = bech32::decode(s)?;
+
+ if var == bech32::Variant::Bech32m {
+ // Consider Bech32m addresses to be "Invalid Checksum", since that is what we'd get if
+ // we didn't support Bech32m (which lightning does not use).
+ return Err(ParseError::Bech32Error(bech32::Error::InvalidChecksum));
+ }
if data.len() < 104 {
return Err(ParseError::TooShortDataPart);
let mut data = self.raw_invoice.data.to_base32();
data.extend_from_slice(&self.signature.to_base32());
- bech32::encode_to_fmt(f, &hrp, data).expect("HRP is valid")?;
+ bech32::encode_to_fmt(f, &hrp, data, bech32::Variant::Bech32).expect("HRP is valid")?;
Ok(())
}
edition = "2018"
[dependencies]
-bitcoin = "0.26"
+bitcoin = "0.27"
lightning = { version = "0.0.99", path = "../lightning" }
tokio = { version = "1.0", features = [ "io-util", "macros", "rt", "sync", "net", "time" ] }
unstable = ["lightning/unstable"]
[dependencies]
-bitcoin = "0.26"
+bitcoin = "0.27"
lightning = { version = "0.0.99", path = "../lightning" }
libc = "0.2"
// Create the channel data file and make it a directory.
fs::create_dir_all(get_full_filepath(path.clone(), filename.to_string())).unwrap();
match write_to_file(path.clone(), filename.to_string(), &test_writeable) {
- Err(e) => assert_eq!(e.kind(), io::ErrorKind::Other),
+ Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
_ => panic!("Unexpected Ok(())")
}
fs::remove_dir_all(path).unwrap();
match write_to_file(path, filename, &test_writeable) {
Err(e) => {
#[cfg(not(target_os = "windows"))]
- assert_eq!(e.kind(), io::ErrorKind::Other);
+ assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
#[cfg(target_os = "windows")]
assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
}
unsafe_revoked_tx_signing = []
unstable = []
-no_std = ["hashbrown"]
-std = []
+no-std = ["hashbrown", "bitcoin/no-std", "core2/alloc"]
+std = ["bitcoin/std"]
default = ["std"]
[dependencies]
-bitcoin = "0.26"
+bitcoin = { version = "0.27", default-features = false, features = ["secp-recovery"] }
+# TODO remove this once rust-bitcoin PR #637 is released
+secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
hashbrown = { version = "0.11", optional = true }
hex = { version = "0.3", optional = true }
regex = { version = "0.1.80", optional = true }
+core2 = { version = "0.3.0", optional = true, default-features = false }
+
[dev-dependencies]
hex = "0.3"
regex = "0.1.80"
+# TODO remove this once rust-bitcoin PR #637 is released
+secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] }
[dev-dependencies.bitcoin]
-version = "0.26"
-features = ["bitcoinconsensus"]
+version = "0.27"
+default-features = false
+features = ["bitcoinconsensus", "secp-recovery"]
[package.metadata.docs.rs]
features = ["allow_wallclock_use"] # When https://github.com/rust-lang/rust/issues/43781 complies with our MSVR, we can add nice banners in the docs for the methods behind this feature-gate.
use prelude::*;
use core::{cmp, mem};
-use std::io::Error;
+use io::{self, Error};
use core::ops::Deref;
use sync::Mutex;
pub const CLOSED_CHANNEL_UPDATE_ID: u64 = core::u64::MAX;
impl Writeable for ChannelMonitorUpdate {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
write_ver_prefix!(w, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.update_id.write(w)?;
(self.updates.len() as u64).write(w)?;
}
}
impl Readable for ChannelMonitorUpdate {
- fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(r, SERIALIZATION_VERSION);
let update_id: u64 = Readable::read(r)?;
let len: u64 = Readable::read(r)?;
}
impl Writeable for CounterpartyCommitmentTransaction {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?;
for (ref txid, ref htlcs) in self.per_htlc.iter() {
w.write_all(&txid[..])?;
}
}
impl Readable for CounterpartyCommitmentTransaction {
- fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let counterparty_commitment_transaction = {
let per_htlc_len: u64 = Readable::read(r)?;
let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64));
output: outp.clone(),
});
break;
- } else if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
+ }
+ if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
if broadcasted_holder_revokable_script.0 == outp.script_pubkey {
spendable_output = Some(SpendableOutputDescriptor::DelayedPaymentOutput(DelayedPaymentOutputDescriptor {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
}));
break;
}
- } else if self.counterparty_payment_script == outp.script_pubkey {
+ }
+ if self.counterparty_payment_script == outp.script_pubkey {
spendable_output = Some(SpendableOutputDescriptor::StaticPaymentOutput(StaticPaymentOutputDescriptor {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
output: outp.clone(),
channel_value_satoshis: self.channel_value_satoshis,
}));
break;
- } else if outp.script_pubkey == self.shutdown_script {
+ }
+ if outp.script_pubkey == self.shutdown_script {
spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
output: outp.clone(),
});
+ break;
}
}
if let Some(spendable_output) = spendable_output {
impl<'a, Signer: Sign, K: KeysInterface<Signer = Signer>> ReadableArgs<&'a K>
for (BlockHash, ChannelMonitor<Signer>) {
- fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
macro_rules! unwrap_obj {
($key: expr) => {
match $key {
use prelude::*;
use core::sync::atomic::{AtomicUsize, Ordering};
-use std::io::Error;
+use io::{self, Error};
use ln::msgs::{DecodeError, MAX_VALUE_MSAT};
/// Information about a spendable output to a P2WSH script. See
}
impl Readable for InMemorySigner {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let funding_key = Readable::read(reader)?;
}
fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, DecodeError> {
- InMemorySigner::read(&mut std::io::Cursor::new(reader))
+ InMemorySigner::read(&mut io::Cursor::new(reader))
}
fn sign_invoice(&self, invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> {
use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
use util::byte_utils;
+use io;
use prelude::*;
use alloc::collections::BTreeMap;
use core::cmp;
;);
impl Readable for Option<Vec<Option<(usize, Signature)>>> {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
match Readable::read(reader)? {
0u8 => Ok(None),
1u8 => {
}
impl Writeable for Option<Vec<Option<(usize, Signature)>>> {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
&Some(ref vec) => {
1u8.write(writer)?;
const MIN_SERIALIZATION_VERSION: u8 = 1;
impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
- pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ pub(crate) fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.destination_script.write(writer)?;
}
impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
- fn read<R: ::std::io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, keys_manager: &'a K) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let destination_script = Readable::read(reader)?;
for _ in 0..locktimed_packages_len {
let locktime = Readable::read(reader)?;
let packages_len: u64 = Readable::read(reader)?;
- let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / std::mem::size_of::<PackageTemplate>()));
+ let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / core::mem::size_of::<PackageTemplate>()));
for _ in 0..packages_len {
packages.push(Readable::read(reader)?);
}
use util::logger::Logger;
use util::ser::{Readable, Writer, Writeable};
+use io;
+use prelude::*;
use core::cmp;
use core::mem;
use core::ops::Deref;
PackageSolvingData::RevokedOutput(_) => output_conf_height + 1,
PackageSolvingData::RevokedHTLCOutput(_) => output_conf_height + 1,
PackageSolvingData::CounterpartyOfferedHTLCOutput(_) => output_conf_height + 1,
- PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => std::cmp::max(outp.htlc.cltv_expiry, output_conf_height + 1),
- PackageSolvingData::HolderHTLCOutput(ref outp) => std::cmp::max(outp.cltv_expiry, output_conf_height + 1),
+ PackageSolvingData::CounterpartyReceivedHTLCOutput(ref outp) => cmp::max(outp.htlc.cltv_expiry, output_conf_height + 1),
+ PackageSolvingData::HolderHTLCOutput(ref outp) => cmp::max(outp.cltv_expiry, output_conf_height + 1),
PackageSolvingData::HolderFundingOutput(_) => output_conf_height + 1,
};
absolute_timelock
}
impl Writeable for PackageTemplate {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&byte_utils::be64_to_array(self.inputs.len() as u64))?;
for (ref outpoint, ref rev_outp) in self.inputs.iter() {
outpoint.write(writer)?;
}
impl Readable for PackageTemplate {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let inputs_count = <u64 as Readable>::read(reader)?;
let mut inputs: Vec<(BitcoinOutPoint, PackageSolvingData)> = Vec::with_capacity(cmp::min(inputs_count as usize, MAX_ALLOC_SIZE / 128));
for _ in 0..inputs_count {
#![allow(bare_trait_objects)]
#![allow(ellipsis_inclusive_range_patterns)]
+#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
+
#![cfg_attr(all(any(test, feature = "_test_utils"), feature = "unstable"), feature(test))]
#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] extern crate test;
+#[cfg(not(any(feature = "std", feature = "no-std")))]
+compile_error!("at least one of the `std` or `no-std` features must be enabled");
+
+#[macro_use]
extern crate alloc;
extern crate bitcoin;
+#[cfg(any(test, feature = "std"))]
extern crate core;
+
#[cfg(any(test, feature = "_test_utils"))] extern crate hex;
#[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] extern crate regex;
+#[cfg(not(feature = "std"))] extern crate core2;
+
#[macro_use]
pub mod util;
pub mod chain;
pub mod ln;
pub mod routing;
+#[cfg(feature = "std")]
+use std::io;
+#[cfg(not(feature = "std"))]
+use core2::io;
+
+#[cfg(not(feature = "std"))]
+mod io_extras {
+ use core2::io::{self, Read, Write};
+
+ /// A writer which will move data into the void.
+ pub struct Sink {
+ _priv: (),
+ }
+
+ /// Creates an instance of a writer which will successfully consume all data.
+ pub const fn sink() -> Sink {
+ Sink { _priv: () }
+ }
+
+ impl core2::io::Write for Sink {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> core2::io::Result<usize> {
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> core2::io::Result<()> {
+ Ok(())
+ }
+ }
+
+ pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> Result<u64, io::Error>
+ where
+ R: Read,
+ W: Write,
+ {
+ let mut count = 0;
+ let mut buf = [0u8; 64];
+
+ loop {
+ match reader.read(&mut buf) {
+ Ok(0) => break,
+ Ok(n) => { writer.write_all(&buf[0..n])?; count += n as u64; },
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {},
+ Err(e) => return Err(e.into()),
+ };
+ }
+ Ok(count)
+ }
+
+ pub fn read_to_end<D: io::Read>(mut d: D) -> Result<alloc::vec::Vec<u8>, io::Error> {
+ let mut result = vec![];
+ let mut buf = [0u8; 64];
+ loop {
+ match d.read(&mut buf) {
+ Ok(0) => break,
+ Ok(n) => result.extend_from_slice(&buf[0..n]),
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {},
+ Err(e) => return Err(e.into()),
+ };
+ }
+ Ok(result)
+ }
+}
+
+#[cfg(feature = "std")]
+mod io_extras {
+ pub fn read_to_end<D: ::std::io::Read>(mut d: D) -> Result<Vec<u8>, ::std::io::Error> {
+ let mut buf = Vec::new();
+ d.read_to_end(&mut buf)?;
+ Ok(buf)
+ }
+
+ pub use std::io::{copy, sink};
+}
+
mod prelude {
#[cfg(feature = "hashbrown")]
extern crate hashbrown;
- pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque};
+ pub use alloc::{vec, vec::Vec, string::String, collections::VecDeque, boxed::Box};
#[cfg(not(feature = "hashbrown"))]
pub use std::collections::{HashMap, HashSet, hash_map};
#[cfg(feature = "hashbrown")]
pub use self::hashbrown::{HashMap, HashSet, hash_map};
+
+ pub use alloc::borrow::ToOwned;
+ pub use alloc::string::ToString;
}
#[cfg(feature = "std")]
use bitcoin::secp256k1::Error as SecpError;
use bitcoin::secp256k1;
+use io;
use prelude::*;
use core::cmp;
use ln::chan_utils;
}
impl Writeable for CounterpartyCommitmentSecrets {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
for &(ref secret, ref idx) in self.old_secrets.iter() {
writer.write_all(secret)?;
writer.write_all(&byte_utils::be64_to_array(*idx))?;
}
}
impl Readable for CounterpartyCommitmentSecrets {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let mut old_secrets = [([0; 32], 1 << 48); 49];
for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
*secret = Readable::read(reader)?;
use util::test_utils;
+use io;
use prelude::*;
use sync::{Arc, Mutex};
let mut w = test_utils::TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+ &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(chain_mon.watch_channel(outpoint, new_monitor).is_ok());
use util::config::{UserConfig,ChannelConfig};
use util::scid_utils::scid_from_parts;
+use io;
use prelude::*;
use core::{cmp,mem,fmt};
use core::ops::Deref;
);
impl Writeable for ChannelUpdateStatus {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
// We only care about writing out the current state as it was announced, ie only either
// Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
// channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
}
impl Readable for ChannelUpdateStatus {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
Ok(match <u8 as Readable>::read(reader)? {
0 => ChannelUpdateStatus::Enabled,
1 => ChannelUpdateStatus::Disabled,
}
impl<Signer: Sign> Writeable for Channel<Signer> {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called.
const MAX_ALLOC_SIZE: usize = 64*1024;
impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
where K::Target: KeysInterface<Signer = Signer> {
- fn read<R : ::std::io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
+ fn read<R : io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let user_id = Readable::read(reader)?;
use util::logger::{Logger, Level};
use util::errors::APIError;
+use io;
use prelude::*;
use core::{cmp, mem};
use core::cell::RefCell;
-use std::io::{Cursor, Read};
+use io::{Cursor, Read};
use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
result = NotifyOption::DoPersist;
}
- let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
if !pending_events.is_empty() {
result = NotifyOption::DoPersist;
}
});
impl Writeable for ClaimableHTLC {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let payment_data = match &self.onion_payload {
OnionPayload::Invoice(data) => Some(data.clone()),
_ => None,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
mod tests {
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use core::sync::atomic::{AtomicBool, Ordering};
use core::time::Duration;
use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use ln::channelmanager::PersistenceNotifier;
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::functional_test_utils::*;
use ln::msgs;
use routing::router::{get_keysend_route, get_route};
use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use util::test_utils;
- use std::sync::Arc;
- use std::thread;
#[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
+ use ln::channelmanager::PersistenceNotifier;
+ use sync::Arc;
+ use core::sync::atomic::{AtomicBool, Ordering};
+ use std::thread;
+
let persistence_notifier = Arc::new(PersistenceNotifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ // All nodes start with a persistable update pending as `create_network` connects each node
+ // with all other nodes to make most tests simpler.
+ assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
// We check that the channel info nodes have doesn't change too early, even though we try
//! [BOLT #9]: https://github.com/lightningnetwork/lightning-rfc/blob/master/09-features.md
//! [messages]: crate::ln::msgs
+use io;
use prelude::*;
use core::{cmp, fmt};
use core::marker::PhantomData;
)*
];
}
+
+ impl alloc::fmt::Display for Features<$context> {
+ fn fmt(&self, fmt: &mut alloc::fmt::Formatter) -> Result<(), alloc::fmt::Error> {
+ $(
+ $(
+ fmt.write_fmt(format_args!("{}: {}, ", stringify!($required_feature),
+ if <$context as $required_feature>::requires_feature(&self.flags) { "required" }
+ else if <$context as $required_feature>::supports_feature(&self.flags) { "supported" }
+ else { "not supported" }))?;
+ )*
+ $(
+ fmt.write_fmt(format_args!("{}: {}, ", stringify!($optional_feature),
+ if <$context as $optional_feature>::requires_feature(&self.flags) { "required" }
+ else if <$context as $optional_feature>::supports_feature(&self.flags) { "supported" }
+ else { "not supported" }))?;
+ )*
+ )*
+ fmt.write_fmt(format_args!("unknown flags: {}",
+ if self.requires_unknown_bits() { "required" }
+ else if self.supports_unknown_bits() { "supported" } else { "none" }))
+ }
+ }
};
}
impl InitFeatures {
/// Writes all features present up to, and including, 13.
- pub(crate) fn write_up_to_13<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ pub(crate) fn write_up_to_13<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
let len = cmp::min(2, self.flags.len());
w.size_hint(len + 2);
(len as u16).write(w)?;
pub(crate) fn requires_data_loss_protect(&self) -> bool {
<T as sealed::DataLossProtect>::requires_feature(&self.flags)
}
+ #[cfg(test)]
pub(crate) fn supports_data_loss_protect(&self) -> bool {
<T as sealed::DataLossProtect>::supports_feature(&self.flags)
}
}
impl<T: sealed::Context> Writeable for Features<T> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(self.flags.len() + 2);
(self.flags.len() as u16).write(w)?;
for f in self.flags.iter().rev() { // Swap back to big-endian
}
impl<T: sealed::Context> Readable for Features<T> {
- fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
let mut flags: Vec<u8> = Readable::read(r)?;
flags.reverse(); // Swap to little-endian
Ok(Self {
use bitcoin::secp256k1::key::PublicKey;
+use io;
use prelude::*;
use core::cell::RefCell;
use std::rc::Rc;
let mut w = test_utils::TestVecWriter(Vec::new());
let network_graph_ser = self.net_graph_msg_handler.network_graph.read().unwrap();
network_graph_ser.write(&mut w).unwrap();
- let network_graph_deser = <NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
+ let network_graph_deser = <NetworkGraph>::read(&mut io::Cursor::new(&w.0)).unwrap();
assert!(network_graph_deser == *self.net_graph_msg_handler.network_graph.read().unwrap());
let net_graph_msg_handler = NetGraphMsgHandler::from_net_graph(
Some(self.chain_source), self.logger, network_graph_deser
let mut w = test_utils::TestVecWriter(Vec::new());
old_monitor.write(&mut w).unwrap();
let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap();
+ &mut io::Cursor::new(&w.0), self.keys_manager).unwrap();
deserialized_monitors.push(deserialized_monitor);
}
}
let mut w = test_utils::TestVecWriter(Vec::new());
self.node.write(&mut w).unwrap();
- <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(w.0), ChannelManagerReadArgs {
default_config: *self.node.get_current_default_configuration(),
keys_manager: self.keys_manager,
fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
})
}
+ for i in 0..node_count {
+ for j in (i+1)..node_count {
+ nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() });
+ }
+ }
+
nodes
}
use regex;
+use io;
use prelude::*;
use alloc::collections::BTreeSet;
use core::default::Default;
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
<(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>
- ::read(&mut std::io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
+ ::read(&mut io::Cursor::new(&chan_manager_serialized.0[..]), ChannelManagerReadArgs {
default_config: Default::default(),
keys_manager,
fee_estimator: node_cfgs[0].fee_estimator,
// Restore node A from previous state
logger = test_utils::TestLogger::with_id(format!("node {}", 0));
- let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
+ let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
chain_source = test_utils::TestChainSource::new(Network::Testnet);
tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
node_state_0 = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
- <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
keys_manager: keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: &monitor,
let mut w = test_utils::TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+ &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
let mut w = test_utils::TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+ &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
let mut w = test_utils::TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+ &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
use prelude::*;
use core::{cmp, fmt};
use core::fmt::Debug;
-use std::io::Read;
+use io::{self, Read};
+use io_extras::read_to_end;
use util::events::MessageSendEventsProvider;
use util::logger;
BadLengthDescriptor,
/// Error from std::io
Io(/// (C-not exported) as ErrorKind doesn't have a reasonable mapping
- ::std::io::ErrorKind),
+ io::ErrorKind),
/// The message included zlib-compressed values, which we don't support.
UnsupportedCompression,
}
}
impl Writeable for NetAddress {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
&NetAddress::IPv4 { ref addr, ref port } => {
1u8.write(writer)?;
}
}
-impl From<::std::io::Error> for DecodeError {
- fn from(e: ::std::io::Error) -> Self {
- if e.kind() == ::std::io::ErrorKind::UnexpectedEof {
+impl From<io::Error> for DecodeError {
+ fn from(e: io::Error) -> Self {
+ if e.kind() == io::ErrorKind::UnexpectedEof {
DecodeError::ShortRead
} else {
DecodeError::Io(e.kind())
}
impl Writeable for OptionalField<Script> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match *self {
OptionalField::Present(ref script) => {
// Note that Writeable for script includes the 16-bit length tag for us
}
impl Writeable for OptionalField<u64> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match *self {
OptionalField::Present(ref value) => {
value.write(w)?;
});
impl Writeable for ChannelReestablish {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(if let OptionalField::Present(..) = self.data_loss_protect { 32+2*8+33+32 } else { 32+2*8 });
self.channel_id.write(w)?;
self.next_local_commitment_number.write(w)?;
});
impl Writeable for Init {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
// global_features gets the bottom 13 bits of our features, and local_features gets all of
// our relevant feature bits. This keeps us compatible with old nodes.
self.features.write_up_to_13(w)?;
});
impl Writeable for OnionPacket {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(1 + 33 + 20*65 + 32);
self.version.write(w)?;
match self.public_key {
});
impl Writeable for FinalOnionHopData {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(32 + 8 - (self.total_msat.leading_zeros()/8) as usize);
self.payment_secret.0.write(w)?;
HighZeroBytesDroppedVarInt(self.total_msat).write(w)
}
impl Writeable for OnionHopData {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(33);
// Note that this should never be reachable if Rust-Lightning generated the message, as we
// check values are sane long before we get here, though its possible in the future
}
impl Writeable for Ping {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(self.byteslen as usize + 4);
self.ponglen.write(w)?;
vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
}
impl Writeable for Pong {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(self.byteslen as usize + 2);
vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
Ok(())
}
impl Writeable for UnsignedChannelAnnouncement {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(2 + 32 + 8 + 4*33 + self.features.byte_count() + self.excess_data.len());
self.features.write(w)?;
self.chain_hash.write(w)?;
node_id_2: Readable::read(r)?,
bitcoin_key_1: Readable::read(r)?,
bitcoin_key_2: Readable::read(r)?,
- excess_data: {
- let mut excess_data = vec![];
- r.read_to_end(&mut excess_data)?;
- excess_data
- },
+ excess_data: read_to_end(r)?,
})
}
}
});
impl Writeable for UnsignedChannelUpdate {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
let mut size = 64 + self.excess_data.len();
let mut message_flags: u8 = 0;
if let OptionalField::Present(_) = self.htlc_maximum_msat {
fee_base_msat: Readable::read(r)?,
fee_proportional_millionths: Readable::read(r)?,
htlc_maximum_msat: if has_htlc_maximum_msat { Readable::read(r)? } else { OptionalField::Absent },
- excess_data: {
- let mut excess_data = vec![];
- r.read_to_end(&mut excess_data)?;
- excess_data
- },
+ excess_data: read_to_end(r)?,
})
}
}
});
impl Writeable for ErrorMessage {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(32 + 2 + self.data.len());
self.channel_id.write(w)?;
(self.data.len() as u16).write(w)?;
channel_id: Readable::read(r)?,
data: {
let mut sz: usize = <u16 as Readable>::read(r)? as usize;
- let mut data = vec![];
- let data_len = r.read_to_end(&mut data)?;
- sz = cmp::min(data_len, sz);
+ let data = read_to_end(r)?;
+ sz = cmp::min(data.len(), sz);
match String::from_utf8(data[..sz as usize].to_vec()) {
Ok(s) => s,
Err(_) => return Err(DecodeError::InvalidValue),
}
impl Writeable for UnsignedNodeAnnouncement {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(76 + self.features.byte_count() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
self.features.write(w)?;
self.timestamp.write(w)?;
}
Vec::new()
};
- r.read_to_end(&mut excess_data)?;
+ excess_data.extend(read_to_end(r)?.iter());
Ok(UnsignedNodeAnnouncement {
features,
timestamp,
}
impl Writeable for QueryShortChannelIds {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
// Calculated from 1-byte encoding_type plus 8-bytes per short_channel_id
let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
}
impl Writeable for ReplyShortChannelIdsEnd {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(32 + 1);
self.chain_hash.write(w)?;
self.full_information.write(w)?;
}
impl Writeable for QueryChannelRange {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(32 + 4 + 4);
self.chain_hash.write(w)?;
self.first_blocknum.write(w)?;
}
impl Writeable for ReplyChannelRange {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
let encoding_len: u16 = 1 + self.short_channel_ids.len() as u16 * 8;
w.size_hint(32 + 4 + 4 + 1 + 2 + encoding_len as usize);
self.chain_hash.write(w)?;
}
impl Writeable for GossipTimestampFilter {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.size_hint(32 + 4 + 4);
self.chain_hash.write(w)?;
self.first_timestamp.write(w)?;
use bitcoin::secp256k1::key::{PublicKey,SecretKey};
use bitcoin::secp256k1::{Secp256k1, Message};
+ use io::Cursor;
use prelude::*;
- use std::io::Cursor;
#[test]
fn encoding_channel_reestablish_no_secret() {
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::key::SecretKey;
+use io;
use prelude::*;
use core::default::Default;
-use std::io;
use ln::functional_test_utils::*;
use bitcoin::secp256k1;
use prelude::*;
-use std::io::Cursor;
+use io::Cursor;
use core::convert::TryInto;
use core::ops::Deref;
#[cfg(test)]
mod tests {
+ use io;
use prelude::*;
use ln::PaymentHash;
use ln::features::{ChannelFeatures, NodeFeatures};
}
}
impl Writeable for RawOnionHopData {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&self.data[..])
}
}
use routing::network_graph::NetGraphMsgHandler;
use prelude::*;
+use io;
use alloc::collections::LinkedList;
use alloc::fmt::Debug;
use sync::{Arc, Mutex};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::{cmp, hash, fmt, mem};
use core::ops::Deref;
-use std::error;
+#[cfg(feature = "std")] use std::error;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256::HashEngine as Sha256Engine;
formatter.write_str("Peer Sent Invalid Data")
}
}
+
+#[cfg(feature = "std")]
impl error::Error for PeerHandleError {
fn description(&self) -> &str {
"Peer Sent Invalid Data"
peer.pending_read_buffer = [0; 18].to_vec();
peer.pending_read_is_header = true;
- let mut reader = ::std::io::Cursor::new(&msg_data[..]);
+ let mut reader = io::Cursor::new(&msg_data[..]);
let message_result = wire::read(&mut reader);
let message = match message_result {
Ok(x) => x,
return Err(PeerHandleError{ no_connection_possible: false }.into());
}
- log_info!(
- self.logger, "Received peer Init message: data_loss_protect: {}, initial_routing_sync: {}, upfront_shutdown_script: {}, gossip_queries: {}, static_remote_key: {}, unknown flags (local and global): {}",
- if msg.features.supports_data_loss_protect() { "supported" } else { "not supported"},
- if msg.features.initial_routing_sync() { "requested" } else { "not requested" },
- if msg.features.supports_upfront_shutdown_script() { "supported" } else { "not supported"},
- if msg.features.supports_gossip_queries() { "supported" } else { "not supported" },
- if msg.features.supports_static_remote_key() { "supported" } else { "not supported"},
- if msg.features.supports_unknown_bits() { "present" } else { "none" }
- );
+ log_info!(self.logger, "Received peer Init message: {}", msg.features);
if msg.features.initial_routing_sync() {
peer.sync_status = InitSyncTracker::ChannelsSyncing(0);
//! Further functional tests which test blockchain reorganizations.
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
+use chain::transaction::OutPoint;
use chain::{Confirm, Watch};
use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
use ln::features::InitFeatures;
use util::ser::{ReadableArgs, Writeable};
use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::opcodes;
use bitcoin::hash_types::BlockHash;
+use bitcoin::secp256k1::Secp256k1;
use prelude::*;
use core::mem;
node_txn.clear();
}
}
+
+fn do_test_to_remote_after_local_detection(style: ConnectStyle) {
+ // In previous code, detection of to_remote outputs in a counterparty commitment transaction
+ // was dependent on whether a local commitment transaction had been seen on-chain previously.
+ // This resulted in some edge cases around not being able to generate a SpendableOutput event
+ // after a reorg.
+ //
+ // Here, we test this by first confirming one set of commitment transactions, then
+ // disconnecting them and reconnecting another. We then confirm them and check that the correct
+ // SpendableOutput event is generated.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ *nodes[0].connect_style.borrow_mut() = style;
+ *nodes[1].connect_style.borrow_mut() = style;
+
+ let (_, _, chan_id, funding_tx) =
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000, InitFeatures::known(), InitFeatures::known());
+ let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
+ assert_eq!(funding_outpoint.to_channel_id(), chan_id);
+
+ let remote_txn_a = get_local_commitment_txn!(nodes[0], chan_id);
+ let remote_txn_b = get_local_commitment_txn!(nodes[1], chan_id);
+
+ mine_transaction(&nodes[0], &remote_txn_a[0]);
+ mine_transaction(&nodes[1], &remote_txn_a[0]);
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+
+ // Drop transactions broadcasted in response to the first commitment transaction (we have good
+ // test coverage of these things already elsewhere).
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ disconnect_blocks(&nodes[0], 1);
+ disconnect_blocks(&nodes[1], 1);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ mine_transaction(&nodes[0], &remote_txn_b[0]);
+ mine_transaction(&nodes[1], &remote_txn_b[0]);
+
+ assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ let mut node_a_spendable = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(node_a_spendable.len(), 1);
+ if let Event::SpendableOutputs { outputs } = node_a_spendable.pop().unwrap() {
+ assert_eq!(outputs.len(), 1);
+ let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ check_spends!(spend_tx, remote_txn_b[0]);
+ }
+
+ // nodes[1] is waiting for the to_self_delay to expire, which is many more than
+ // ANTI_REORG_DELAY. Instead, walk it back and confirm the original remote_txn_a commitment
+ // again and check that nodes[1] generates a similar spendable output.
+ // Technically a reorg of ANTI_REORG_DELAY violates our assumptions, so this is undefined by
+ // our API spec, but we currently handle this correctly and there's little reason we shouldn't
+ // in the future.
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ disconnect_blocks(&nodes[1], ANTI_REORG_DELAY);
+ mine_transaction(&nodes[1], &remote_txn_a[0]);
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+
+ let mut node_b_spendable = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events();
+ assert_eq!(node_b_spendable.len(), 1);
+ if let Event::SpendableOutputs { outputs } = node_b_spendable.pop().unwrap() {
+ assert_eq!(outputs.len(), 1);
+ let spend_tx = nodes[1].keys_manager.backing.spend_spendable_outputs(&[&outputs[0]], Vec::new(),
+ Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &Secp256k1::new()).unwrap();
+ check_spends!(spend_tx, remote_txn_a[0]);
+ }
+}
+
+#[test]
+fn test_to_remote_after_local_detection() {
+ do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirst);
+ do_test_to_remote_after_local_detection(ConnectStyle::BestBlockFirstSkippingBlocks);
+ do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirst);
+ do_test_to_remote_after_local_detection(ConnectStyle::TransactionsFirstSkippingBlocks);
+ do_test_to_remote_after_local_detection(ConnectStyle::FullBlockViaListen);
+}
//!
//! [BOLT #1]: https://github.com/lightningnetwork/lightning-rfc/blob/master/01-messaging.md
+use io;
use ln::msgs;
use util::ser::{Readable, Writeable, Writer};
/// # Errors
///
/// Returns an error if the message payload code not be decoded as the specified type.
-pub fn read<R: ::std::io::Read>(buffer: &mut R) -> Result<Message, msgs::DecodeError> {
+pub fn read<R: io::Read>(buffer: &mut R) -> Result<Message, msgs::DecodeError> {
let message_type = <u16 as Readable>::read(buffer)?;
match message_type {
msgs::Init::TYPE => {
/// # Errors
///
/// Returns an I/O error if the write could not be completed.
-pub fn write<M: Encode + Writeable, W: Writer>(message: &M, buffer: &mut W) -> Result<(), ::std::io::Error> {
+pub fn write<M: Encode + Writeable, W: Writer>(message: &M, buffer: &mut W) -> Result<(), io::Error> {
M::TYPE.write(buffer)?;
message.write(buffer)
}
#[test]
fn read_empty_buffer() {
let buffer = [];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
assert!(read(&mut reader).is_err());
}
#[test]
fn read_incomplete_type() {
let buffer = &ENCODED_PONG[..1];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
assert!(read(&mut reader).is_err());
}
#[test]
fn read_empty_payload() {
let buffer = &ENCODED_PONG[..2];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
assert!(read(&mut reader).is_err());
}
#[test]
fn read_invalid_message() {
let buffer = &ENCODED_PONG[..4];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
assert!(read(&mut reader).is_err());
}
#[test]
fn read_known_message() {
let buffer = &ENCODED_PONG[..];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let message = read(&mut reader).unwrap();
match message {
Message::Pong(_) => (),
#[test]
fn read_unknown_message() {
let buffer = &::core::u16::MAX.to_be_bytes();
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let message = read(&mut reader).unwrap();
match message {
Message::Unknown(MessageType(::core::u16::MAX)) => (),
let mut buffer = Vec::new();
assert!(write(&message, &mut buffer).is_ok());
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let decoded_message = read(&mut reader).unwrap();
match decoded_message {
Message::Pong(msgs::Pong { byteslen: 2u16 }) => (),
}
fn check_init_msg(buffer: Vec<u8>, expect_unknown: bool) {
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let decoded_msg = read(&mut reader).unwrap();
match decoded_msg {
Message::Init(msgs::Init { features }) => {
fn read_lnd_node_announcement() {
// Taken from lnd v0.9.0-beta.
let buffer = vec![1, 1, 91, 164, 146, 213, 213, 165, 21, 227, 102, 33, 105, 179, 214, 21, 221, 175, 228, 93, 57, 177, 191, 127, 107, 229, 31, 50, 21, 81, 179, 71, 39, 18, 35, 2, 89, 224, 110, 123, 66, 39, 148, 246, 177, 85, 12, 19, 70, 226, 173, 132, 156, 26, 122, 146, 71, 213, 247, 48, 93, 190, 185, 177, 12, 172, 0, 3, 2, 162, 161, 94, 103, 195, 37, 2, 37, 242, 97, 140, 2, 111, 69, 85, 39, 118, 30, 221, 99, 254, 120, 49, 103, 22, 170, 227, 111, 172, 164, 160, 49, 68, 138, 116, 16, 22, 206, 107, 51, 153, 255, 97, 108, 105, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 1, 172, 21, 0, 2, 38, 7];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let decoded_msg = read(&mut reader).unwrap();
match decoded_msg {
Message::NodeAnnouncement(msgs::NodeAnnouncement { contents: msgs::UnsignedNodeAnnouncement { features, ..}, ..}) => {
fn read_lnd_chan_announcement() {
// Taken from lnd v0.9.0-beta.
let buffer = vec![1, 0, 82, 238, 153, 33, 128, 87, 215, 2, 28, 241, 140, 250, 98, 255, 56, 5, 79, 240, 214, 231, 172, 35, 240, 171, 44, 9, 78, 91, 8, 193, 102, 5, 17, 178, 142, 106, 180, 183, 46, 38, 217, 212, 25, 236, 69, 47, 92, 217, 181, 221, 161, 205, 121, 201, 99, 38, 158, 216, 186, 193, 230, 86, 222, 6, 206, 67, 22, 255, 137, 212, 141, 161, 62, 134, 76, 48, 241, 54, 50, 167, 187, 247, 73, 27, 74, 1, 129, 185, 197, 153, 38, 90, 255, 138, 39, 161, 102, 172, 213, 74, 107, 88, 150, 90, 0, 49, 104, 7, 182, 184, 194, 219, 181, 172, 8, 245, 65, 226, 19, 228, 101, 145, 25, 159, 52, 31, 58, 93, 53, 59, 218, 91, 37, 84, 103, 17, 74, 133, 33, 35, 2, 203, 101, 73, 19, 94, 175, 122, 46, 224, 47, 168, 128, 128, 25, 26, 25, 214, 52, 247, 43, 241, 117, 52, 206, 94, 135, 156, 52, 164, 143, 234, 58, 185, 50, 185, 140, 198, 174, 71, 65, 18, 105, 70, 131, 172, 137, 0, 164, 51, 215, 143, 117, 119, 217, 241, 197, 177, 227, 227, 170, 199, 114, 7, 218, 12, 107, 30, 191, 236, 203, 21, 61, 242, 48, 192, 90, 233, 200, 199, 111, 162, 68, 234, 54, 219, 1, 233, 66, 5, 82, 74, 84, 211, 95, 199, 245, 202, 89, 223, 102, 124, 62, 166, 253, 253, 90, 180, 118, 21, 61, 110, 37, 5, 96, 167, 0, 0, 6, 34, 110, 70, 17, 26, 11, 89, 202, 175, 18, 96, 67, 235, 91, 191, 40, 195, 79, 58, 94, 51, 42, 31, 199, 178, 183, 60, 241, 136, 145, 15, 0, 2, 65, 0, 0, 1, 0, 0, 2, 37, 242, 97, 140, 2, 111, 69, 85, 39, 118, 30, 221, 99, 254, 120, 49, 103, 22, 170, 227, 111, 172, 164, 160, 49, 68, 138, 116, 16, 22, 206, 107, 3, 54, 61, 144, 88, 171, 247, 136, 208, 99, 9, 135, 37, 201, 178, 253, 136, 0, 185, 235, 68, 160, 106, 110, 12, 46, 21, 125, 204, 18, 75, 234, 16, 3, 42, 171, 28, 52, 224, 11, 30, 30, 253, 156, 148, 175, 203, 121, 250, 111, 122, 195, 84, 122, 77, 183, 56, 135, 101, 88, 41, 60, 191, 99, 232, 85, 2, 36, 17, 156, 11, 8, 12, 189, 177, 68, 88, 28, 15, 207, 21, 179, 151, 56, 226, 158, 148, 3, 120, 113, 177, 243, 184, 17, 173, 37, 46, 222, 16];
- let mut reader = ::std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let decoded_msg = read(&mut reader).unwrap();
match decoded_msg {
Message::ChannelAnnouncement(msgs::ChannelAnnouncement { contents: msgs::UnsignedChannelAnnouncement { features, ..}, ..}) => {
use util::events::{MessageSendEvent, MessageSendEventsProvider};
use util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
+use io;
use prelude::*;
use alloc::collections::{BTreeMap, btree_map::Entry as BtreeEntry};
use core::{cmp, fmt};
const MIN_SERIALIZATION_VERSION: u8 = 1;
impl Writeable for NetworkGraph {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.genesis_hash.write(writer)?;
}
impl Readable for NetworkGraph {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<NetworkGraph, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
use bitcoin::secp256k1::key::{PublicKey, SecretKey};
use bitcoin::secp256k1::{All, Secp256k1};
+ use io;
use prelude::*;
use sync::Arc;
assert!(!network.get_nodes().is_empty());
assert!(!network.get_channels().is_empty());
network.write(&mut w).unwrap();
- assert!(<NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network);
+ assert!(<NetworkGraph>::read(&mut io::Cursor::new(&w.0)).unwrap() == *network);
}
#[test]
use util::ser::{Writeable, Readable};
use util::logger::Logger;
+use io;
use prelude::*;
use alloc::collections::BinaryHeap;
use core::cmp;
const MIN_SERIALIZATION_VERSION: u8 = 1;
impl Writeable for Route {
- fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
(self.paths.len() as u64).write(writer)?;
for hops in self.paths.iter() {
}
impl Readable for Route {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Route, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Route, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let path_count: u64 = Readable::read(reader)?;
let mut paths = Vec::with_capacity(cmp::min(path_count, 128) as usize);
}
}
- #[cfg(not(feature = "no_std"))]
+ #[cfg(not(feature = "no-std"))]
pub(super) fn random_init_seed() -> u64 {
// Because the default HashMap in std pulls OS randomness, we can use it as a (bad) RNG.
use core::hash::{BuildHasher, Hasher};
println!("Using seed of {}", seed);
seed
}
- #[cfg(not(feature = "no_std"))]
+ #[cfg(not(feature = "no-std"))]
use util::ser::Readable;
#[test]
- #[cfg(not(feature = "no_std"))]
+ #[cfg(not(feature = "no-std"))]
fn generate_routes() {
let mut d = match super::test_utils::get_route_file() {
Ok(f) => f,
}
#[test]
- #[cfg(not(feature = "no_std"))]
+ #[cfg(not(feature = "no-std"))]
fn generate_routes_mpp() {
let mut d = match super::test_utils::get_route_file() {
Ok(f) => f,
}
}
-#[cfg(all(test, not(feature = "no_std")))]
+#[cfg(all(test, not(feature = "no-std")))]
pub(crate) mod test_utils {
use std::fs::File;
/// Tries to open a network graph file, or panics with a URL to fetch it.
}
}
-#[cfg(all(test, feature = "unstable", not(feature = "no_std")))]
+#[cfg(all(test, feature = "unstable", not(feature = "no-std")))]
mod benches {
use super::*;
use util::logger::{Logger, Record};
// You may not use this file except in accordance with one or both of these
// licenses.
-use std::io;
+use io;
#[cfg(not(feature = "fuzztarget"))]
mod real_chacha {
use ln::{chan_utils, msgs};
use chain::keysinterface::{Sign, InMemorySigner, BaseSign};
+use io;
use prelude::*;
use core::cmp;
use sync::{Mutex, Arc};
use bitcoin::secp256k1::key::{SecretKey, PublicKey};
use bitcoin::secp256k1::{Secp256k1, Signature};
use util::ser::{Writeable, Writer, Readable};
-use std::io::Error;
+use io::Error;
use ln::msgs::DecodeError;
/// Initial value for revoked commitment downward counter
}
impl Readable for EnforcingSigner {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
let inner = Readable::read(reader)?;
let last_commitment_number = Readable::read(reader)?;
Ok(EnforcingSigner {
use bitcoin::secp256k1::key::PublicKey;
+use io;
use prelude::*;
use core::time::Duration;
use core::ops::Deref;
}
impl Writeable for Event {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
&Event::FundingGenerationReady { .. } => {
0u8.write(writer)?;
}
}
impl MaybeReadable for Event {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
+ fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
match Readable::read(reader)? {
0u8 => Ok(None),
1u8 => {
//! as ChannelsManagers and ChannelMonitors.
use prelude::*;
-use std::io::{Read, Write};
+use io::{self, Read, Write};
+use io_extras::{copy, sink};
use core::hash::Hash;
use sync::Mutex;
use core::cmp;
/// (C-not exported) as we only export serialization to/from byte arrays instead
pub trait Writer {
/// Writes the given buf out. See std::io::Write::write_all for more
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error>;
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error>;
/// Hints that data of the given size is about the be written. This may not always be called
/// prior to data being written and may be safely ignored.
fn size_hint(&mut self, size: usize);
impl<W: Write> Writer for W {
#[inline]
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
- <Self as ::std::io::Write>::write_all(self, buf)
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
+ <Self as io::Write>::write_all(self, buf)
}
#[inline]
fn size_hint(&mut self, _size: usize) { }
pub(crate) struct WriterWriteAdaptor<'a, W: Writer + 'a>(pub &'a mut W);
impl<'a, W: Writer + 'a> Write for WriterWriteAdaptor<'a, W> {
#[inline]
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
self.0.write_all(buf)
}
#[inline]
- fn write(&mut self, buf: &[u8]) -> Result<usize, ::std::io::Error> {
+ fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.0.write_all(buf)?;
Ok(buf.len())
}
#[inline]
- fn flush(&mut self) -> Result<(), ::std::io::Error> {
+ fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
pub(crate) struct VecWriter(pub Vec<u8>);
impl Writer for VecWriter {
#[inline]
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
self.0.extend_from_slice(buf);
Ok(())
}
pub(crate) struct LengthCalculatingWriter(pub usize);
impl Writer for LengthCalculatingWriter {
#[inline]
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
self.0 += buf.len();
Ok(())
}
#[inline]
pub fn eat_remaining(&mut self) -> Result<(), DecodeError> {
- ::std::io::copy(self, &mut ::std::io::sink()).unwrap();
+ copy(self, &mut sink()).unwrap();
if self.bytes_read != self.total_bytes {
Err(DecodeError::ShortRead)
} else {
}
impl<R: Read> Read for FixedLengthReader<R> {
#[inline]
- fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
+ fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
if self.total_bytes == self.bytes_read {
Ok(0)
} else {
}
impl<R: Read> Read for ReadTrackingReader<R> {
#[inline]
- fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
+ fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
match self.read.read(dest) {
Ok(0) => Ok(0),
Ok(len) => {
/// (C-not exported) as we only export serialization to/from byte arrays instead
pub trait Writeable {
/// Writes self out to the given Writer
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error>;
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error>;
/// Writes self out to a Vec<u8>
fn encode(&self) -> Vec<u8> {
}
impl<'a, T: Writeable> Writeable for &'a T {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> { (*self).write(writer) }
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> { (*self).write(writer) }
}
/// A trait that various rust-lightning types implement allowing them to be read in from a Read
pub(crate) struct VecWriteWrapper<'a, T: Writeable>(pub &'a Vec<T>);
impl<'a, T: Writeable> Writeable for VecWriteWrapper<'a, T> {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
for ref v in self.0.iter() {
v.write(writer)?;
}
pub(crate) struct U48(pub u64);
impl Writeable for U48 {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&be48_to_array(self.0))
}
}
pub(crate) struct BigSize(pub u64);
impl Writeable for BigSize {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self.0 {
0...0xFC => {
(self.0 as u8).write(writer)
($val_type:ty, $len: expr) => {
impl Writeable for $val_type {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&self.to_be_bytes())
}
}
impl Writeable for HighZeroBytesDroppedVarInt<$val_type> {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
// Skip any full leading 0 bytes when writing (in BE):
writer.write_all(&self.0.to_be_bytes()[(self.0.leading_zeros()/8) as usize..$len])
}
impl Writeable for u8 {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&[*self])
}
}
impl Writeable for bool {
#[inline]
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(&[if *self {1} else {0}])
}
}
impl Writeable for [u8; $size]
{
#[inline]
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(self)
}
}
V: Writeable
{
#[inline]
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u16).write(w)?;
for (key, value) in self.iter() {
key.write(w)?;
// Vectors
impl Writeable for Vec<u8> {
#[inline]
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u16).write(w)?;
w.write_all(&self)
}
}
impl Writeable for Vec<Signature> {
#[inline]
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u16).write(w)?;
for e in self.iter() {
e.write(w)?;
}
impl Writeable for Script {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
(self.len() as u16).write(w)?;
w.write_all(self.as_bytes())
}
}
impl Writeable for PublicKey {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.serialize().write(w)
}
#[inline]
}
impl Writeable for SecretKey {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
let mut ser = [0; SECRET_KEY_SIZE];
ser.copy_from_slice(&self[..]);
ser.write(w)
}
impl Writeable for Sha256dHash {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&self[..])
}
}
}
impl Writeable for Signature {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.serialize_compact().write(w)
}
#[inline]
}
impl Writeable for PaymentPreimage {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
}
impl Writeable for PaymentHash {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
}
impl Writeable for PaymentSecret {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)
}
}
}
impl<T: Writeable> Writeable for Box<T> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
T::write(&**self, w)
}
}
}
impl<T: Writeable> Writeable for Option<T> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match *self {
None => 0u8.write(w)?,
Some(ref data) => {
}
impl Writeable for Txid {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&self[..])
}
}
}
impl Writeable for BlockHash {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
w.write_all(&self[..])
}
}
}
impl Writeable for OutPoint {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.txid.write(w)?;
self.vout.write(w)?;
Ok(())
macro_rules! impl_consensus_ser {
($bitcoin_type: ty) => {
impl Writeable for $bitcoin_type {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self.consensus_encode(WriterWriteAdaptor(writer)) {
Ok(_) => Ok(()),
Err(e) => Err(e),
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
match consensus::encode::Decodable::consensus_decode(r) {
Ok(t) => Ok(t),
- Err(consensus::encode::Error::Io(ref e)) if e.kind() == ::std::io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
+ Err(consensus::encode::Error::Io(ref e)) if e.kind() == io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
Err(consensus::encode::Error::Io(e)) => Err(DecodeError::Io(e.kind())),
Err(_) => Err(DecodeError::InvalidValue),
}
}
}
impl<T: Writeable> Writeable for Mutex<T> {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.lock().unwrap().write(w)
}
}
}
}
impl<A: Writeable, B: Writeable> Writeable for (A, B) {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)?;
self.1.write(w)
}
}
}
impl<A: Writeable, B: Writeable, C: Writeable> Writeable for (A, B, C) {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
self.0.write(w)?;
self.1.write(w)?;
self.2.write(w)
#[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
let invalid_order = ($last_seen_type.is_none() || $last_seen_type.unwrap() < $type) && $typ.0 > $type;
if invalid_order {
- Err(DecodeError::InvalidValue)?
+ return Err(DecodeError::InvalidValue);
}
}};
($last_seen_type: expr, $typ: expr, $type: expr, option) => {{
#[allow(unused_comparisons)] // Note that $type may be 0 making the second comparison always true
let missing_req_type = $last_seen_type.is_none() || $last_seen_type.unwrap() < $type;
if missing_req_type {
- Err(DecodeError::InvalidValue)?
+ return Err(DecodeError::InvalidValue);
}
}};
($last_seen_type: expr, $type: expr, vec_type) => {{
match ser::Readable::read(&mut tracking_reader) {
Err(DecodeError::ShortRead) => {
if !tracking_reader.have_read {
- break 'tlv_read
+ break 'tlv_read;
} else {
- Err(DecodeError::ShortRead)?
+ return Err(DecodeError::ShortRead);
}
},
- Err(e) => Err(e)?,
+ Err(e) => return Err(e),
Ok(t) => t,
}
};
// Types must be unique and monotonically increasing:
match last_seen_type {
Some(t) if typ.0 <= t => {
- Err(DecodeError::InvalidValue)?
+ return Err(DecodeError::InvalidValue);
},
_ => {},
}
decode_tlv!(s, $field, $fieldty);
if s.bytes_remain() {
s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
- Err(DecodeError::InvalidValue)?
+ return Err(DecodeError::InvalidValue);
}
},)*
x if x % 2 == 0 => {
- Err(DecodeError::UnknownRequiredFeature)?
+ return Err(DecodeError::UnknownRequiredFeature);
},
_ => {},
}
macro_rules! impl_writeable {
($st:ident, $len: expr, {$($field:ident),*}) => {
impl ::util::ser::Writeable for $st {
- fn write<W: ::util::ser::Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: ::util::ser::Writer>(&self, w: &mut W) -> Result<(), $crate::io::Error> {
if $len != 0 {
w.size_hint($len);
}
}
impl ::util::ser::Readable for $st {
- fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+ fn read<R: $crate::io::Read>(r: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
Ok(Self {
$($field: ::util::ser::Readable::read(r)?),*
})
macro_rules! impl_writeable_len_match {
($struct: ident, $cmp: tt, ($calc_len: expr), {$({$match: pat, $length: expr}),*}, {$($field:ident),*}) => {
impl Writeable for $struct {
- fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), $crate::io::Error> {
let len = match *self {
$($match => $length,)*
};
}
impl ::util::ser::Readable for $struct {
- fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ fn read<R: $crate::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
Ok(Self {
$($field: Readable::read(r)?),*
})
macro_rules! impl_writeable_tlv_based {
($st: ident, {$(($type: expr, $field: ident, $fieldty: ident)),* $(,)*}) => {
impl ::util::ser::Writeable for $st {
- fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
write_tlv_fields!(writer, {
$(($type, self.$field, $fieldty)),*
});
}
impl ::util::ser::Readable for $st {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+ fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
$(
init_tlv_field_var!($field, $fieldty);
)*
),* $(,)*;
$(($tuple_variant_id: expr, $tuple_variant_name: ident)),* $(,)*) => {
impl ::util::ser::Writeable for $st {
- fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), $crate::io::Error> {
match self {
$($st::$variant_name { $(ref $field),* } => {
let id: u8 = $variant_id;
}
impl ::util::ser::Readable for $st {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
+ fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {
let id: u8 = ::util::ser::Readable::read(reader)?;
match id {
$($variant_id => {
Ok($st::$tuple_variant_name(Readable::read(reader)?))
}),*
_ => {
- Err(DecodeError::UnknownRequiredFeature)?
+ Err(DecodeError::UnknownRequiredFeature)
},
}
}
#[cfg(test)]
mod tests {
+ use io::{self, Cursor};
use prelude::*;
- use std::io::Cursor;
use ln::msgs::DecodeError;
use util::ser::{Writeable, HighZeroBytesDroppedVarInt, VecWriter};
use bitcoin::secp256k1::PublicKey;
do_test!(concat!("fd00fe", "02", "0226"), None, None, None, Some(550));
}
- fn do_simple_test_tlv_write() -> Result<(), ::std::io::Error> {
+ fn do_simple_test_tlv_write() -> Result<(), io::Error> {
let mut stream = VecWriter(Vec::new());
stream.0.clear();
use regex;
+use io;
use prelude::*;
use core::time::Duration;
use sync::{Mutex, Arc};
pub struct TestVecWriter(pub Vec<u8>);
impl Writer for TestVecWriter {
- fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> {
self.0.extend_from_slice(buf);
Ok(())
}
fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
fn read_chan_signer(&self, reader: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
- EnforcingSigner::read(&mut std::io::Cursor::new(reader))
+ EnforcingSigner::read(&mut io::Cursor::new(reader))
}
fn sign_invoice(&self, _invoice_preimage: Vec<u8>) -> Result<RecoverableSignature, ()> { unreachable!(); }
}
let mut w = TestVecWriter(Vec::new());
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
+ &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
assert!(new_monitor == monitor);
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
let mut w = TestVecWriter(Vec::new());
update.write(&mut w).unwrap();
assert!(channelmonitor::ChannelMonitorUpdate::read(
- &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
+ &mut io::Cursor::new(&w.0)).unwrap() == update);
if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
assert_eq!(funding_txo.to_channel_id(), exp.0);
w.0.clear();
monitor.write(&mut w).unwrap();
let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
- &mut ::std::io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
+ &mut io::Cursor::new(&w.0), self.keys_manager).unwrap().1;
assert!(new_monitor == *monitor);
self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
}
fn read_chan_signer(&self, buffer: &[u8]) -> Result<Self::Signer, msgs::DecodeError> {
- let mut reader = std::io::Cursor::new(buffer);
+ let mut reader = io::Cursor::new(buffer);
let inner: InMemorySigner = Readable::read(&mut reader)?;
let revoked_commitment = self.make_revoked_commitment_cell(inner.commitment_seed);
use ln::msgs::MAX_VALUE_MSAT;
use prelude::*;
+use io_extras::sink;
use core::cmp::Ordering;
pub fn sort_outputs<T, C : Fn(&T, &T) -> Ordering>(outputs: &mut Vec<(TxOut, T)>, tie_breaker: C) {
script_pubkey: change_destination_script,
value: 0,
};
- let change_len = change_output.consensus_encode(&mut std::io::sink()).unwrap();
+ let change_len = change_output.consensus_encode(&mut sink()).unwrap();
let mut weight_with_change: i64 = tx.get_weight() as i64 + 2 + witness_max_weight as i64 + change_len as i64 * 4;
// Include any extra bytes required to push an extra output.
weight_with_change += (VarInt(tx.output.len() as u64 + 1).len() - VarInt(tx.output.len() as u64).len()) as i64 * 4;