use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::Logger;
use util::errors::APIError;
+use prelude::*;
use core::{cmp, mem};
+use core::cell::RefCell;
use std::collections::{HashMap, hash_map, HashSet};
use std::io::{Cursor, Read};
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
/// Note that this includes RBF or similar transaction replacement strategies - lightning does
/// not currently support replacing a funding transaction on an existing channel. Instead,
/// create a new channel with a conflicting funding transaction.
+ ///
+ /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
// smaller than 500:
const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
- /// Generates a signed node_announcement from the given arguments and creates a
- /// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
- /// seen a channel_announcement from us (ie unless we have public channels open).
+ /// Regenerates channel_announcements and generates a signed node_announcement from the given
+ /// arguments, providing them in corresponding events via
+ /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
+ /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
+ /// announcement to ensure that the lightning P2P network is aware of the channels we have and
+ /// our network addresses.
+ ///
+ /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
+ /// node to humans. They carry no in-protocol meaning.
///
- /// RGB is a node "color" and alias is a printable human-readable string to describe this node
- /// to humans. They carry no in-protocol meaning.
+ /// `addresses` represent the set (possibly empty) of socket addresses on which this node
+ /// accepts incoming connections. These will be included in the node_announcement, publicly
+ /// tying these addresses together and to this node. If you wish to preserve user privacy,
+ /// addresses should likely contain only Tor Onion addresses.
///
- /// addresses represent the set (possibly empty) of socket addresses on which this node accepts
- /// incoming connections. These will be broadcast to the network, publicly tying these
- /// addresses together. If you wish to preserve user privacy, addresses should likely contain
- /// only Tor Onion addresses.
+ /// Panics if `addresses` is absurdly large (more than 500).
///
- /// Panics if addresses is absurdly large (more than 500).
+ /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
excess_data: Vec::new(),
};
let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+ let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
- msg: msgs::NodeAnnouncement {
- signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
- contents: announcement
- },
- });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+
+ let mut announced_chans = false;
+ for (_, chan) in channel_state.by_id.iter() {
+ if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg,
+ update_msg: match self.get_channel_update(chan) {
+ Ok(msg) => msg,
+ Err(_) => continue,
+ },
+ });
+ announced_chans = true;
+ } else {
+ // If the channel is not public or has not yet reached funding_locked, check the
+ // next channel. If we don't yet have any public channels, we'll skip the broadcast
+ // below as peers may not accept it without channels on chain first.
+ }
+ }
+
+ if announced_chans {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: msgs::NodeAnnouncement {
+ signature: node_announce_sig,
+ contents: announcement
+ },
+ });
+ }
}
/// Processes HTLCs which are pending waiting on random forward delay.
}
#[cfg(any(test, feature = "_test_utils"))]
- pub(crate) fn test_process_background_events(&self) {
+ /// Process background events, for functional testing
+ pub fn test_process_background_events(&self) {
self.process_background_events();
}
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
}
- let our_node_id = self.get_our_node_id();
- let (announcement, our_bitcoin_sig) =
- try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
-
- let were_node_one = announcement.node_id_1 == our_node_id;
- let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
- {
- let their_node_key = if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 };
- let their_bitcoin_key = if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 };
- match (self.secp_ctx.verify(&msghash, &msg.node_signature, their_node_key),
- self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, their_bitcoin_key)) {
- (Err(e), _) => {
- let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify node_signature: {:?}. Maybe using different node_secret for transport and routing msg? UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", e, &announcement, their_node_key));
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
- },
- (_, Err(e)) => {
- let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify bitcoin_signature: {:?}. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", e, &announcement, their_bitcoin_key));
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
- },
- _ => {}
- }
- }
-
- let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
-
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg: msgs::ChannelAnnouncement {
- node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
- node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
- bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
- bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
- contents: announcement,
- },
+ msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
});
},
}
}
- /// Process pending events from the `chain::Watch`.
- fn process_pending_monitor_events(&self) {
+ /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
- {
- for monitor_event in self.chain_monitor.release_pending_monitor_events() {
- match monitor_event {
- MonitorEvent::HTLCEvent(htlc_update) => {
- if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
- } else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let has_pending_monitor_events = !pending_monitor_events.is_empty();
+ for monitor_event in pending_monitor_events {
+ match monitor_event {
+ MonitorEvent::HTLCEvent(htlc_update) => {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ } else {
+ log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ },
+ MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
}
- },
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
- if let Some(short_id) = chan.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(chan.force_shutdown(false));
- if let Ok(update) = self.get_channel_update(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
- },
+ failed_channels.push(chan.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
});
}
- },
- }
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
+ });
+ }
+ },
}
}
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
+
+ has_pending_monitor_events
}
/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
+ /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
+ /// update was applied.
+ ///
/// This should only apply to HTLCs which were added to the holding cell because we were
/// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
/// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
/// code to inform them of a channel monitor update.
- fn check_free_holding_cells(&self) {
+ fn check_free_holding_cells(&self) -> bool {
+ let mut has_monitor_update = false;
let mut failed_htlcs = Vec::new();
let mut handle_errors = Vec::new();
{
by_id.retain(|channel_id, chan| {
match chan.maybe_free_holding_cell_htlcs(&self.logger) {
- Ok((None, ref htlcs)) if htlcs.is_empty() => true,
Ok((commitment_opt, holding_cell_failed_htlcs)) => {
- failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
+ }
if let Some((commitment_update, monitor_update)) = commitment_opt {
if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ has_monitor_update = true;
let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
handle_errors.push((chan.get_counterparty_node_id(), res));
if close_channel { return false; }
}
});
}
+
+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
for (failures, channel_id) in failed_htlcs.drain(..) {
self.fail_holding_cell_htlcs(failures, channel_id);
}
for (counterparty_node_id, err) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ has_update
}
/// Handle a list of channel failures during a block_connected or block_disconnected call,
pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
}
+
+ #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
+ pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let events = core::cell::RefCell::new(Vec::new());
+ let event_handler = |event| events.borrow_mut().push(event);
+ self.process_pending_events(&event_handler);
+ events.into_inner()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ let events = RefCell::new(Vec::new());
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
- self.check_free_holding_cells();
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
- let mut ret = Vec::new();
- let mut channel_state = self.channel_state.lock().unwrap();
- mem::swap(&mut ret, &mut channel_state.pending_msg_events);
- ret
+ if self.check_free_holding_cells() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = Vec::new();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+
+ if !pending_events.is_empty() {
+ events.replace(pending_events);
+ }
+
+ result
+ });
+ events.into_inner()
}
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- fn get_and_clear_pending_events(&self) -> Vec<Event> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ /// Processes events that must be periodically handled.
+ ///
+ /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
+ /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
+ ///
+ /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
+ /// when processed, an [`EventHandler`] must be able to handle previously seen events when
+ /// restarting from an old state.
+ fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
- let mut ret = Vec::new();
- let mut pending_events = self.pending_events.lock().unwrap();
- mem::swap(&mut ret, &mut *pending_events);
- ret
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events.drain(..) {
+ handler.handle_event(event);
+ }
+
+ result
+ });
}
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl Writeable for PendingHTLCInfo {
+impl Writeable for PendingHTLCRouting {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match &self.routing {
+ match &self {
&PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => {
0u8.write(writer)?;
onion_packet.write(writer)?;
incoming_cltv_expiry.write(writer)?;
},
}
- self.incoming_shared_secret.write(writer)?;
- self.payment_hash.write(writer)?;
- self.amt_to_forward.write(writer)?;
- self.outgoing_cltv_value.write(writer)?;
Ok(())
}
}
-impl Readable for PendingHTLCInfo {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
- Ok(PendingHTLCInfo {
- routing: match Readable::read(reader)? {
- 0u8 => PendingHTLCRouting::Forward {
- onion_packet: Readable::read(reader)?,
- short_channel_id: Readable::read(reader)?,
- },
- 1u8 => PendingHTLCRouting::Receive {
- payment_data: msgs::FinalOnionHopData {
- payment_secret: Readable::read(reader)?,
- total_msat: Readable::read(reader)?,
- },
- incoming_cltv_expiry: Readable::read(reader)?,
+impl Readable for PendingHTLCRouting {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCRouting, DecodeError> {
+ match Readable::read(reader)? {
+ 0u8 => Ok(PendingHTLCRouting::Forward {
+ onion_packet: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ }),
+ 1u8 => Ok(PendingHTLCRouting::Receive {
+ payment_data: msgs::FinalOnionHopData {
+ payment_secret: Readable::read(reader)?,
+ total_msat: Readable::read(reader)?,
},
- _ => return Err(DecodeError::InvalidValue),
- },
- incoming_shared_secret: Readable::read(reader)?,
- payment_hash: Readable::read(reader)?,
- amt_to_forward: Readable::read(reader)?,
- outgoing_cltv_value: Readable::read(reader)?,
- })
+ incoming_cltv_expiry: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
}
}
+impl_writeable_tlv_based!(PendingHTLCInfo, {
+ (0, routing),
+ (2, incoming_shared_secret),
+ (4, payment_hash),
+ (6, amt_to_forward),
+ (8, outgoing_cltv_value)
+}, {}, {});
+
impl Writeable for HTLCFailureMsg {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
match self {
}
}
-impl_writeable!(HTLCPreviousHopData, 0, {
- short_channel_id,
- outpoint,
- htlc_id,
- incoming_packet_shared_secret
-});
+impl_writeable_tlv_based!(HTLCPreviousHopData, {
+ (0, short_channel_id),
+ (2, outpoint),
+ (4, htlc_id),
+ (6, incoming_packet_shared_secret)
+}, {}, {});
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- self.prev_hop.write(writer)?;
- self.value.write(writer)?;
- self.payment_data.payment_secret.write(writer)?;
- self.payment_data.total_msat.write(writer)?;
- self.cltv_expiry.write(writer)
+ write_tlv_fields!(writer, {
+ (0, self.prev_hop),
+ (2, self.value),
+ (4, self.payment_data.payment_secret),
+ (6, self.payment_data.total_msat),
+ (8, self.cltv_expiry)
+ }, {});
+ Ok(())
}
}
impl Readable for ClaimableHTLC {
fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let mut prev_hop = HTLCPreviousHopData {
+ short_channel_id: 0, htlc_id: 0,
+ incoming_packet_shared_secret: [0; 32],
+ outpoint: OutPoint::null(),
+ };
+ let mut value = 0;
+ let mut payment_secret = PaymentSecret([0; 32]);
+ let mut total_msat = 0;
+ let mut cltv_expiry = 0;
+ read_tlv_fields!(reader, {
+ (0, prev_hop),
+ (2, value),
+ (4, payment_secret),
+ (6, total_msat),
+ (8, cltv_expiry)
+ }, {});
Ok(ClaimableHTLC {
- prev_hop: Readable::read(reader)?,
- value: Readable::read(reader)?,
+ prev_hop,
+ value,
payment_data: msgs::FinalOnionHopData {
- payment_secret: Readable::read(reader)?,
- total_msat: Readable::read(reader)?,
+ payment_secret,
+ total_msat,
},
- cltv_expiry: Readable::read(reader)?,
+ cltv_expiry,
})
}
}
}
}
-impl_writeable!(PendingInboundPayment, 0, {
- payment_secret,
- expiry_time,
- user_payment_id,
- payment_preimage,
- min_value_msat
-});
+impl_writeable_tlv_based!(PendingInboundPayment, {
+ (0, payment_secret),
+ (2, expiry_time),
+ (4, user_payment_id),
+ (6, payment_preimage),
+ (8, min_value_msat),
+}, {}, {});
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
- writer.write_all(&[SERIALIZATION_VERSION; 1])?;
- writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+ write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.genesis_hash.write(writer)?;
{
session_priv.write(writer)?;
}
+ write_tlv_fields!(writer, {}, {});
+
Ok(())
}
}
L::Target: Logger,
{
fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
- let _ver: u8 = Readable::read(reader)?;
- let min_ver: u8 = Readable::read(reader)?;
- if min_ver > SERIALIZATION_VERSION {
- return Err(DecodeError::UnknownVersion);
- }
+ let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
}
}
+ read_tlv_fields!(reader, {}, {});
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
use routing::router::get_route;
use util::test_utils;
use util::config::UserConfig;
- use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::{Block, BlockHeader, Transaction, TxOut};
- use std::sync::Mutex;
+ use std::sync::{Arc, Mutex};
use test::Bencher;
let network = bitcoin::Network::Testnet;
let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
- let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
+ let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
let mut config: UserConfig = Default::default();