use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
use ln::router::{Route,RouteHop};
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, HandleError};
+use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
use chain::keysinterface::KeysInterface;
use util::{byte_utils, events, internal_traits, rng};
use util::sha2::Sha256;
-use util::ser::{Readable, Writeable};
+use util::ser::{Readable, ReadableArgs, Writeable, Writer};
use util::chacha20poly1305rfc::ChaCha20;
use util::logger::Logger;
use util::errors::APIError;
use crypto::digest::Digest;
use crypto::symmetriccipher::SynchronousStreamCipher;
-use std::{ptr, mem};
-use std::collections::HashMap;
-use std::collections::hash_map;
+use std::{cmp, ptr, mem};
+use std::collections::{HashMap, hash_map, HashSet};
use std::io::Cursor;
-use std::sync::{Mutex,MutexGuard,Arc};
+use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Instant,Duration};
///
/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
/// to individual Channels.
+///
+/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
+/// all peers during write/read (though does not modify this instance, only the instance being
+/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
+/// called funding_transaction_generated for outbound channels).
+///
+/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
+/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
+/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// the serialization process). If the deserialized version is out-of-date compared to the
+/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
+/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
+///
+/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
+/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
+/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
+/// block_connected() to step towards your best block) upon deserialization before using the
+/// object!
pub struct ChannelManager {
genesis_hash: Sha256dHash,
fee_estimator: Arc<FeeEstimator>,
announce_channels_publicly: bool,
fee_proportional_millionths: u32,
latest_block_height: AtomicUsize,
+ last_block_hash: Mutex<Sha256dHash>,
secp_ctx: Secp256k1<secp256k1::All>,
channel_state: Mutex<ChannelHolder>,
our_network_key: SecretKey,
pending_events: Mutex<Vec<events::Event>>,
+ /// Used when we have to take a BIG lock to make sure everything is self-consistent.
+ /// Essentially just when we're serializing ourselves out.
+ /// Taken first everywhere where we are making changes before any other locks.
+ total_consistency_lock: RwLock<()>,
keys_manager: Arc<KeysInterface>,
announce_channels_publicly,
fee_proportional_millionths,
- latest_block_height: AtomicUsize::new(0), //TODO: Get an init value (generally need to replay recent chain on chain_monitor registration)
+ latest_block_height: AtomicUsize::new(0), //TODO: Get an init value
+ last_block_hash: Mutex::new(Default::default()),
secp_ctx,
channel_state: Mutex::new(ChannelHolder{
our_network_key: keys_manager.get_node_secret(),
pending_events: Mutex::new(Vec::new()),
+ total_consistency_lock: RwLock::new(()),
keys_manager,
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
+
+ let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.entry(channel.channel_id()) {
hash_map::Entry::Occupied(_) => {
///
/// May generate a SendShutdown message event on success, which should be relayed.
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let (mut failed_htlcs, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+ let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state = self.channel_state.lock().unwrap();
let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
/// May panic if the funding_txo is duplicative with some other channel (note that this should
/// be trivially prevented by using unique funding transaction keys per-channel).
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let (chan, msg, chan_monitor) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.remove(temporary_channel_id) {
/// Should only really ever be called in response to an PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
{
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event.
pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
let mut payment_hash = [0; 32];
sha.result(&mut payment_hash);
+ let _ = self.total_consistency_lock.read().unwrap();
+
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some(mut sources) = removed_source {
let mut close_results = Vec::new();
let mut htlc_forwards = Vec::new();
let mut htlc_failures = Vec::new();
+ let _ = self.total_consistency_lock.read().unwrap();
{
let mut channel_lock = self.channel_state.lock().unwrap();
/// Note: This API is likely to change!
#[doc(hidden)]
pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
+ let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
impl ChainListener for ChannelManager {
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
+ let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
self.finish_force_close_channel(failure);
}
self.latest_block_height.store(height as usize, Ordering::Release);
+ *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
}
/// We force-close the channel without letting our counterparty participate in the shutdown
fn block_disconnected(&self, header: &BlockHeader) {
+ let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
self.finish_force_close_channel(failure);
}
self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
+ *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
}
}
impl ChannelMessageHandler for ChannelManager {
//TODO: Handle errors and close channel (or so)
fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
}
fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
}
fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
}
fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
}
fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
}
fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
}
fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
}
fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
}
fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
}
fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
}
fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
}
fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
}
fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
}
fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
}
fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> {
+ let _ = self.total_consistency_lock.read().unwrap();
handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
}
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
+ let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
let mut failed_payments = Vec::new();
{
}
fn peer_connected(&self, their_node_id: &PublicKey) {
+ let _ = self.total_consistency_lock.read().unwrap();
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
let pending_msg_events = channel_state.pending_msg_events;
}
fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
if msg.channel_id == [0; 32] {
for chan in self.list_channels() {
if chan.remote_network_id == *their_node_id {
}
}
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+impl Writeable for PendingForwardHTLCInfo {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ if let &Some(ref onion) = &self.onion_packet {
+ 1u8.write(writer)?;
+ onion.write(writer)?;
+ } else {
+ 0u8.write(writer)?;
+ }
+ self.incoming_shared_secret.write(writer)?;
+ self.payment_hash.write(writer)?;
+ self.short_channel_id.write(writer)?;
+ self.amt_to_forward.write(writer)?;
+ self.outgoing_cltv_value.write(writer)?;
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
+ fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
+ let onion_packet = match <u8 as Readable<R>>::read(reader)? {
+ 0 => None,
+ 1 => Some(msgs::OnionPacket::read(reader)?),
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ Ok(PendingForwardHTLCInfo {
+ onion_packet,
+ incoming_shared_secret: Readable::read(reader)?,
+ payment_hash: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ amt_to_forward: Readable::read(reader)?,
+ outgoing_cltv_value: Readable::read(reader)?,
+ })
+ }
+}
+
+impl Writeable for HTLCFailureMsg {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCFailureMsg::Relay(ref fail_msg) => {
+ 0u8.write(writer)?;
+ fail_msg.write(writer)?;
+ },
+ &HTLCFailureMsg::Malformed(ref fail_msg) => {
+ 1u8.write(writer)?;
+ fail_msg.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCFailureMsg {
+ fn read(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
+ 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for PendingHTLCStatus {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &PendingHTLCStatus::Forward(ref forward_info) => {
+ 0u8.write(writer)?;
+ forward_info.write(writer)?;
+ },
+ &PendingHTLCStatus::Fail(ref fail_msg) => {
+ 1u8.write(writer)?;
+ fail_msg.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for PendingHTLCStatus {
+ fn read(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
+ 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl_writeable!(HTLCPreviousHopData, 0, {
+ short_channel_id,
+ htlc_id,
+ incoming_packet_shared_secret
+});
+
+impl Writeable for HTLCSource {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCSource::PreviousHopData(ref hop_data) => {
+ 0u8.write(writer)?;
+ hop_data.write(writer)?;
+ },
+ &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
+ 1u8.write(writer)?;
+ route.write(writer)?;
+ session_priv.write(writer)?;
+ first_hop_htlc_msat.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCSource {
+ fn read(reader: &mut R) -> Result<HTLCSource, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
+ 1 => Ok(HTLCSource::OutboundRoute {
+ route: Readable::read(reader)?,
+ session_priv: Readable::read(reader)?,
+ first_hop_htlc_msat: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for HTLCFailReason {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCFailReason::ErrorPacket { ref err } => {
+ 0u8.write(writer)?;
+ err.write(writer)?;
+ },
+ &HTLCFailReason::Reason { ref failure_code, ref data } => {
+ 1u8.write(writer)?;
+ failure_code.write(writer)?;
+ data.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
+ fn read(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }),
+ 1 => Ok(HTLCFailReason::Reason {
+ failure_code: Readable::read(reader)?,
+ data: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl_writeable!(HTLCForwardInfo, 0, {
+ prev_short_channel_id,
+ prev_htlc_id,
+ forward_info
+});
+
+impl Writeable for ChannelManager {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ let _ = self.total_consistency_lock.write().unwrap();
+
+ writer.write_all(&[SERIALIZATION_VERSION; 1])?;
+ writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+
+ self.genesis_hash.write(writer)?;
+ self.announce_channels_publicly.write(writer)?;
+ self.fee_proportional_millionths.write(writer)?;
+ (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
+ self.last_block_hash.lock().unwrap().write(writer)?;
+
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut unfunded_channels = 0;
+ for (_, channel) in channel_state.by_id.iter() {
+ if !channel.is_funding_initiated() {
+ unfunded_channels += 1;
+ }
+ }
+ ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+ for (_, channel) in channel_state.by_id.iter() {
+ if channel.is_funding_initiated() {
+ channel.write(writer)?;
+ }
+ }
+
+ (channel_state.forward_htlcs.len() as u64).write(writer)?;
+ for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
+ short_channel_id.write(writer)?;
+ (pending_forwards.len() as u64).write(writer)?;
+ for forward in pending_forwards {
+ forward.write(writer)?;
+ }
+ }
+
+ (channel_state.claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
+ payment_hash.write(writer)?;
+ (previous_hops.len() as u64).write(writer)?;
+ for previous_hop in previous_hops {
+ previous_hop.write(writer)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+/// Arguments for the creation of a ChannelManager that are not deserialized.
+///
+/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
+/// is:
+/// 1) Deserialize all stored ChannelMonitors.
+/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
+/// ChannelManager)>::read(reader, args).
+/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
+/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
+/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
+/// ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
+/// 4) Reconnect blocks on your ChannelMonitors.
+/// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
+/// 6) Disconnect/connect blocks on the ChannelManager.
+/// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen
+/// automatically as it does in ChannelManager::new()).
+pub struct ChannelManagerReadArgs<'a> {
+ /// The keys provider which will give us relevant keys. Some keys will be loaded during
+ /// deserialization.
+ pub keys_manager: Arc<KeysInterface>,
+
+ /// The fee_estimator for use in the ChannelManager in the future.
+ ///
+ /// No calls to the FeeEstimator will be made during deserialization.
+ pub fee_estimator: Arc<FeeEstimator>,
+ /// The ManyChannelMonitor for use in the ChannelManager in the future.
+ ///
+ /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
+ /// you have deserialized ChannelMonitors separately and will add them to your
+ /// ManyChannelMonitor after deserializing this ChannelManager.
+ pub monitor: Arc<ManyChannelMonitor>,
+ /// The ChainWatchInterface for use in the ChannelManager in the future.
+ ///
+ /// No calls to the ChainWatchInterface will be made during deserialization.
+ pub chain_monitor: Arc<ChainWatchInterface>,
+ /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
+ /// used to broadcast the latest local commitment transactions of channels which must be
+ /// force-closed during deserialization.
+ pub tx_broadcaster: Arc<BroadcasterInterface>,
+ /// The Logger for use in the ChannelManager and which may be used to log information during
+ /// deserialization.
+ pub logger: Arc<Logger>,
+
+
+ /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
+ /// value.get_funding_txo() should be the key).
+ ///
+ /// If a monitor is inconsistent with the channel state during deserialization the channel will
+ /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This
+ /// is true for missing channels as well. If there is a monitor missing for which we find
+ /// channel data Err(DecodeError::InvalidValue) will be returned.
+ ///
+ /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
+ /// this struct.
+ pub channel_monitors: &'a HashMap<OutPoint, &'a ChannelMonitor>,
+}
+
+impl<'a, R : ::std::io::Read> ReadableArgs<R, ChannelManagerReadArgs<'a>> for (Sha256dHash, ChannelManager) {
+ fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result<Self, DecodeError> {
+ let _ver: u8 = Readable::read(reader)?;
+ let min_ver: u8 = Readable::read(reader)?;
+ if min_ver > SERIALIZATION_VERSION {
+ return Err(DecodeError::UnknownVersion);
+ }
+
+ let genesis_hash: Sha256dHash = Readable::read(reader)?;
+ let announce_channels_publicly: bool = Readable::read(reader)?;
+ let fee_proportional_millionths: u32 = Readable::read(reader)?;
+ let latest_block_height: u32 = Readable::read(reader)?;
+ let last_block_hash: Sha256dHash = Readable::read(reader)?;
+
+ let mut closed_channels = Vec::new();
+
+ let channel_count: u64 = Readable::read(reader)?;
+ let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ for _ in 0..channel_count {
+ let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?;
+ if channel.last_block_connected != last_block_hash {
+ return Err(DecodeError::InvalidValue);
+ }
+
+ let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_set.insert(funding_txo.clone());
+ if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
+ if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
+ channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
+ channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
+ let mut force_close_res = channel.force_shutdown();
+ force_close_res.0 = monitor.get_latest_local_commitment_txn();
+ closed_channels.push(force_close_res);
+ } else {
+ if let Some(short_channel_id) = channel.get_short_channel_id() {
+ short_to_id.insert(short_channel_id, channel.channel_id());
+ }
+ by_id.insert(channel.channel_id(), channel);
+ }
+ } else {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ for (ref funding_txo, ref monitor) in args.channel_monitors.iter() {
+ if !funding_txo_set.contains(funding_txo) {
+ closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
+ }
+ }
+
+ let forward_htlcs_count: u64 = Readable::read(reader)?;
+ let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+ for _ in 0..forward_htlcs_count {
+ let short_channel_id = Readable::read(reader)?;
+ let pending_forwards_count: u64 = Readable::read(reader)?;
+ let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128));
+ for _ in 0..pending_forwards_count {
+ pending_forwards.push(Readable::read(reader)?);
+ }
+ forward_htlcs.insert(short_channel_id, pending_forwards);
+ }
+
+ let claimable_htlcs_count: u64 = Readable::read(reader)?;
+ let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
+ for _ in 0..claimable_htlcs_count {
+ let payment_hash = Readable::read(reader)?;
+ let previous_hops_len: u64 = Readable::read(reader)?;
+ let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
+ for _ in 0..previous_hops_len {
+ previous_hops.push(Readable::read(reader)?);
+ }
+ claimable_htlcs.insert(payment_hash, previous_hops);
+ }
+
+ let channel_manager = ChannelManager {
+ genesis_hash,
+ fee_estimator: args.fee_estimator,
+ monitor: args.monitor,
+ chain_monitor: args.chain_monitor,
+ tx_broadcaster: args.tx_broadcaster,
+
+ announce_channels_publicly,
+ fee_proportional_millionths,
+ latest_block_height: AtomicUsize::new(latest_block_height as usize),
+ last_block_hash: Mutex::new(last_block_hash),
+ secp_ctx: Secp256k1::new(),
+
+ channel_state: Mutex::new(ChannelHolder {
+ by_id,
+ short_to_id,
+ next_forward: Instant::now(),
+ forward_htlcs,
+ claimable_htlcs,
+ pending_msg_events: Vec::new(),
+ }),
+ our_network_key: args.keys_manager.get_node_secret(),
+
+ pending_events: Mutex::new(Vec::new()),
+ total_consistency_lock: RwLock::new(()),
+ keys_manager: args.keys_manager,
+ logger: args.logger,
+ };
+
+ for close_res in closed_channels.drain(..) {
+ channel_manager.finish_force_close_channel(close_res);
+ //TODO: Broadcast channel update for closed channels, but only after we've made a
+ //connection or two.
+ }
+
+ Ok((last_block_hash.clone(), channel_manager))
+ }
+}
+
#[cfg(test)]
mod tests {
use chain::chaininterface;
use chain::transaction::OutPoint;
- use chain::chaininterface::ChainListener;
+ use chain::chaininterface::{ChainListener, ChainWatchInterface};
use chain::keysinterface::KeysInterface;
use chain::keysinterface;
- use ln::channelmanager::{ChannelManager,OnionKeys,PaymentFailReason,RAACommitmentOrder};
- use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+ use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,OnionKeys,PaymentFailReason,RAACommitmentOrder};
+ use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
use ln::router::{Route, RouteHop, Router};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
use util::logger::Logger;
- use util::ser::Writeable;
+ use util::ser::{Writeable, Writer, ReadableArgs};
use bitcoin::util::hash::Sha256dHash;
use bitcoin::blockdata::block::{Block, BlockHeader};
chan_monitor: Arc<test_utils::TestChannelMonitor>,
node: Arc<ChannelManager>,
router: Router,
+ node_seed: [u8; 32],
network_payment_count: Rc<RefCell<u8>>,
network_chan_count: Rc<RefCell<u32>>,
}
let mut seed = [0; 32];
rng.fill_bytes(&mut seed);
let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger)));
- let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
+ let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone()));
let node = ChannelManager::new(0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone()).unwrap();
let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
- nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router,
+ nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, node_seed: seed,
network_payment_count: payment_count.clone(),
network_chan_count: chan_count.clone(),
});
sign_msg!(unsigned_msg);
assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
}
+
+ struct VecWriter(Vec<u8>);
+ impl Writer for VecWriter {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ self.0.extend_from_slice(buf);
+ Ok(())
+ }
+ fn size_hint(&mut self, size: usize) {
+ self.0.reserve_exact(size);
+ }
+ }
+
+ #[test]
+ fn test_no_txn_manager_serialize_deserialize() {
+ let mut nodes = create_network(2);
+
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let nodes_0_serialized = nodes[0].node.encode();
+ let mut chan_0_monitor_serialized = VecWriter(Vec::new());
+ nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
+ <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &channel_monitors,
+ }).unwrap()
+ };
+ assert!(nodes_0_read.is_empty());
+
+ assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+ let nodes_0_as_listener: Arc<ChainListener> = nodes[0].node.clone();
+ nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener));
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ for node in nodes.iter() {
+ assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 1000000);
+ }
+
+ #[test]
+ fn test_simple_manager_serialize_deserialize() {
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let nodes_0_serialized = nodes[0].node.encode();
+ let mut chan_0_monitor_serialized = VecWriter(Vec::new());
+ nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
+ <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &channel_monitors,
+ }).unwrap()
+ };
+ assert!(nodes_0_read.is_empty());
+
+ assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+ check_added_monitors!(nodes[0], 1);
+
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
+ claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
+ }
+
+ #[test]
+ fn test_manager_serialize_deserialize_inconsistent_monitor() {
+ // Test deserializing a ChannelManager with a out-of-date ChannelMonitor
+ let mut nodes = create_network(4);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 2, 0);
+ let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3);
+
+ let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
+
+ // Serialize the ChannelManager here, but the monitor we keep up-to-date
+ let nodes_0_serialized = nodes[0].node.encode();
+
+ route_payment(&nodes[0], &[&nodes[3]], 1000000);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
+ // nodes[3])
+ let mut node_0_monitors_serialized = Vec::new();
+ for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+ let mut writer = VecWriter(Vec::new());
+ monitor.1.write_for_disk(&mut writer).unwrap();
+ node_0_monitors_serialized.push(writer.0);
+ }
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new())));
+ let mut node_0_monitors = Vec::new();
+ for serialized in node_0_monitors_serialized.iter() {
+ let mut read = &serialized[..];
+ let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(read.is_empty());
+ node_0_monitors.push(monitor);
+ }
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let keys_manager = Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(),
+ }).unwrap();
+ assert!(nodes_0_read.is_empty());
+
+ { // Channel close should result in a commitment tx and an HTLC tx
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid());
+ }
+
+ for monitor in node_0_monitors.drain(..) {
+ assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
+ check_added_monitors!(nodes[0], 1);
+ }
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+
+ // nodes[1] and nodes[2] have no lost state with nodes[0]...
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(&nodes[0], &nodes[2], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ //... and we can even still claim the payment!
+ claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
+
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
+ if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
+ assert_eq!(msg.channel_id, channel_id);
+ } else { panic!("Unexpected result"); }
+ }
}