X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=83746d01a94c86d1d062c61f9f36b6e5224f5f9f;hb=6df9129ace609bfb5c7f08ae0f41175126d05b1b;hp=0985044c77e85281e7d1d47db3d76670c3897109;hpb=523cab8ef74bb8f51d5ad719decc02015a41fe6d;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 0985044c..83746d01 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! The top-level channel management and payment tracking stuff lives here. //! //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is @@ -29,14 +38,16 @@ use bitcoin::secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; +use ln::msgs::NetAddress; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys}; use util::config::UserConfig; +use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; @@ -303,7 +314,7 @@ pub(super) struct ChannelHolder { claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). - pub(super) pending_msg_events: Vec, + pub(super) pending_msg_events: Vec, } /// State we hold per-peer. In the future we should put channels in here, but for now we only hold @@ -1474,7 +1485,7 @@ impl // be absurd. We ensure this by checking that at least 500 (our stated public contract on when // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2; + const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; #[deny(const_err)] #[allow(dead_code)] // ...by failing to compile if the number of addresses that would be half of a message is @@ -1494,7 +1505,7 @@ impl /// only Tor Onion addresses. /// /// Panics if addresses is absurdly large (more than 500). - pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { + pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { let _ = self.total_consistency_lock.read().unwrap(); if addresses.len() > 500 { @@ -2957,31 +2968,61 @@ impl Err(e) => { Err(APIError::APIMisuseError { err: e.err })} } } + + /// Process pending events from the ManyChannelMonitor. + fn process_pending_monitor_events(&self) { + let mut failed_channels = Vec::new(); + { + for monitor_event in self.monitor.get_and_clear_pending_monitor_events() { + match monitor_event { + MonitorEvent::HTLCEvent(htlc_update) => { + if let Some(preimage) = htlc_update.payment_preimage { + log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } else { + log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + } + }, + MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { + let mut channel_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) { + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + failed_channels.push(chan.force_shutdown(false)); + if let Ok(update) = self.get_channel_update(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + } + }, + } + } + } + + for failure in failed_channels.drain(..) { + self.finish_force_close_channel(failure); + } + } } -impl events::MessageSendEventsProvider for ChannelManager +impl MessageSendEventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_msg_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: it's very easy for a - // user to serialize a ChannelManager with pending events in it and lose those events on - // restart. This is doubly true for the fail/fulfill-backs from monitor events! - { - //TODO: This behavior should be documented. - for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() { - if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); - } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); - } - } - } + fn get_and_clear_pending_msg_events(&self) -> Vec { + //TODO: This behavior should be documented. It's non-intuitive that we query + // ChannelMonitors when clearing other events. + self.process_pending_monitor_events(); let mut ret = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); @@ -2990,29 +3031,17 @@ impl } } -impl events::EventsProvider for ChannelManager +impl EventsProvider for ChannelManager where M::Target: ManyChannelMonitor, T::Target: BroadcasterInterface, K::Target: KeysInterface, F::Target: FeeEstimator, L::Target: Logger, { - fn get_and_clear_pending_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: it's very easy for a - // user to serialize a ChannelManager with pending events in it and lose those events on - // restart. This is doubly true for the fail/fulfill-backs from monitor events! - { - //TODO: This behavior should be documented. - for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() { - if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); - } else { - log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); - } - } - } + fn get_and_clear_pending_events(&self) -> Vec { + //TODO: This behavior should be documented. It's non-intuitive that we query + // ChannelMonitors when clearing other events. + self.process_pending_monitor_events(); let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); @@ -3095,21 +3124,6 @@ impl