X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=b73f8667073bac64abdb11fd9e2f7e81fbb07c7e;hb=133a8a31311b817f8e1e7512b3250bf71c5747f2;hp=60fe1c4586eb89471113ffe0c8ed6ae97fb90897;hpb=e58440fe454653cedab465c495e39881fbc74aab;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 60fe1c45..b73f8667 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -30,26 +30,26 @@ use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::router::Route; -use ln::features::InitFeatures; +use ln::features::{InitFeatures, NodeFeatures}; use ln::msgs; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; -use chain::keysinterface::{ChannelKeys, KeysInterface}; +use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys}; use util::config::UserConfig; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, Writeable, Writer}; -use util::chacha20::ChaCha20; +use util::chacha20::{ChaCha20, ChaChaReader}; use util::logger::Logger; use util::errors::APIError; use std::{cmp, mem}; use std::collections::{HashMap, hash_map, HashSet}; -use std::io::Cursor; +use std::io::{Cursor, Read}; use std::sync::{Arc, Mutex, MutexGuard, RwLock}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; - -const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65]; +use std::marker::{Sync, Send}; +use std::ops::Deref; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -57,15 +57,19 @@ const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65]; // forward the HTLC with information it will give back to us when it does so, or if it should Fail // the HTLC with the relevant message for the Channel to handle giving to the remote peer. // -// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo -// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData -// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill -// the HTLC backwards along the relevant path). +// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the +// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo +// with it to track where it came from (in case of onwards-forward error), waiting a random delay +// before we forward it. +// +// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a +// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use +// to either fail-backwards or fulfill the HTLC backwards along the relevant path). // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is // our payment, which we can use to decode errors or inform the user that the payment was sent. -/// Stores the info we will need to send when we want to forward an HTLC onwards + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug -pub(super) struct PendingForwardHTLCInfo { +pub(super) struct PendingHTLCInfo { onion_packet: Option, incoming_shared_secret: [u8; 32], payment_hash: PaymentHash, @@ -83,10 +87,22 @@ pub(super) enum HTLCFailureMsg { /// Stores whether we can't forward an HTLC or relevant forwarding info #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum PendingHTLCStatus { - Forward(PendingForwardHTLCInfo), + Forward(PendingHTLCInfo), Fail(HTLCFailureMsg), } +pub(super) enum HTLCForwardInfo { + AddHTLC { + prev_short_channel_id: u64, + prev_htlc_id: u64, + forward_info: PendingHTLCInfo, + }, + FailHTLC { + htlc_id: u64, + err_packet: msgs::OnionErrorPacket, + }, +} + /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone, PartialEq)] pub(super) struct HTLCPreviousHopData { @@ -231,18 +247,6 @@ impl MsgHandleErrInternal { /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; -pub(super) enum HTLCForwardInfo { - AddHTLC { - prev_short_channel_id: u64, - prev_htlc_id: u64, - forward_info: PendingForwardHTLCInfo, - }, - FailHTLC { - htlc_id: u64, - err_packet: msgs::OnionErrorPacket, - }, -} - /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order @@ -262,7 +266,7 @@ pub(super) struct ChannelHolder { /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the existence of a channel with the short id here, nor the short - /// ids in the PendingForwardHTLCInfo! + /// ids in the PendingHTLCInfo! pub(super) forward_htlcs: HashMap>, /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and /// can be failed/claimed by the user @@ -275,9 +279,30 @@ pub(super) struct ChannelHolder { pub(super) pending_msg_events: Vec, } +/// State we hold per-peer. In the future we should put channels in here, but for now we only hold +/// the latest Init features we heard from the peer. +struct PeerState { + latest_features: InitFeatures, +} + #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height"; +/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. +/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static +/// lifetimes). Other times you can afford a reference, which is more efficient, in which case +/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents +/// issues such as overly long function definitions. +pub type SimpleArcChannelManager = Arc>>; + +/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference +/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't +/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as +/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes). +/// But if this is not necessary, using a reference is more efficient. Defining these type aliases +/// helps with issues such as long function definitions. +pub type SimpleRefChannelManager<'a, M> = ChannelManager; + /// Manager which keeps track of a number of channels and sends messages to the appropriate /// channel, also tracking HTLC preimages and forwarding onion packets appropriately. /// @@ -307,12 +332,18 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum /// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been /// offline for a full minute. In order to track this, you must call -/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfec. -pub struct ChannelManager { +/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect. +/// +/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager +/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but +/// essentially you should default to using a SimpleRefChannelManager, and use a +/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when +/// you're using lightning-net-tokio. +pub struct ChannelManager where M::Target: ManyChannelMonitor { default_configuration: UserConfig, genesis_hash: Sha256dHash, fee_estimator: Arc, - monitor: Arc, + monitor: M, tx_broadcaster: Arc, #[cfg(test)] @@ -328,6 +359,16 @@ pub struct ChannelManager { channel_state: Mutex>, our_network_key: SecretKey, + last_node_announcement_serial: AtomicUsize, + + /// The bulk of our storage will eventually be here (channels and message queues and the like). + /// If we are connected to a peer we always at least have an entry here, even if no channels + /// are currently open with that peer. + /// Because adding or removing an entry is rare, we usually take an outer read lock and then + /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a + /// new channel. + per_peer_state: RwLock>>, + pending_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. /// Essentially just when we're serializing ourselves out. @@ -390,6 +431,10 @@ pub struct ChannelDetails { pub short_channel_id: Option, /// The node_id of our counterparty pub remote_network_id: PublicKey, + /// The Features the channel counterparty provided upon last connection. + /// Useful for routing as it is the most up-to-date copy of the counterparty's features and + /// many routing-relevant features are present in the init context. + pub counterparty_features: InitFeatures, /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, /// The user_id passed in to create_channel, or 0 if the channel was inbound. @@ -532,7 +577,7 @@ macro_rules! handle_monitor_err { } else if $resend_commitment { "commitment" } else if $resend_raa { "RAA" } else { "nothing" }, - (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(), + (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(), (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len()); if !$resend_commitment { debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); @@ -568,7 +613,7 @@ macro_rules! maybe_break_monitor_err { } } -impl ChannelManager { +impl ChannelManager where M::Target: ManyChannelMonitor { /// Constructs a new ChannelManager to hold several channels and route between them. /// /// This is the main "logic hub" for all channel-related actions, and implements @@ -587,14 +632,14 @@ impl ChannelManager { /// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's /// `block_(dis)connected` methods, which will notify all registered listeners in one /// go. - pub fn new(network: Network, feeest: Arc, monitor: Arc, tx_broadcaster: Arc, logger: Arc,keys_manager: Arc>, config: UserConfig, current_blockchain_height: usize) -> Result>, secp256k1::Error> { + pub fn new(network: Network, feeest: Arc, monitor: M, tx_broadcaster: Arc, logger: Arc,keys_manager: Arc>, config: UserConfig, current_blockchain_height: usize) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); - let res = Arc::new(ChannelManager { + let res = ChannelManager { default_configuration: config.clone(), genesis_hash: genesis_block(network).header.bitcoin_hash(), fee_estimator: feeest.clone(), - monitor: monitor.clone(), + monitor, tx_broadcaster, latest_block_height: AtomicUsize::new(current_blockchain_height), @@ -610,13 +655,17 @@ impl ChannelManager { }), our_network_key: keys_manager.get_node_secret(), + last_node_announcement_serial: AtomicUsize::new(0), + + per_peer_state: RwLock::new(HashMap::new()), + pending_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), keys_manager, logger, - }); + }; Ok(res) } @@ -660,56 +709,53 @@ impl ChannelManager { Ok(()) } - /// Gets the list of open channels, in random order. See ChannelDetail field documentation for - /// more information. - pub fn list_channels(&self) -> Vec { - let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.by_id.len()); - for (channel_id, channel) in channel_state.by_id.iter() { - let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); - res.push(ChannelDetails { - channel_id: (*channel_id).clone(), - short_channel_id: channel.get_short_channel_id(), - remote_network_id: channel.get_their_node_id(), - channel_value_satoshis: channel.get_value_satoshis(), - inbound_capacity_msat, - outbound_capacity_msat, - user_id: channel.get_user_id(), - is_live: channel.is_live(), - }); - } - res - } - - /// Gets the list of usable channels, in random order. Useful as an argument to - /// Router::get_route to ensure non-announced channels are used. - /// - /// These are guaranteed to have their is_live value set to true, see the documentation for - /// ChannelDetails::is_live for more info on exactly what the criteria are. - pub fn list_usable_channels(&self) -> Vec { - let channel_state = self.channel_state.lock().unwrap(); - let mut res = Vec::with_capacity(channel_state.by_id.len()); - for (channel_id, channel) in channel_state.by_id.iter() { - // Note we use is_live here instead of usable which leads to somewhat confused - // internal/external nomenclature, but that's ok cause that's probably what the user - // really wanted anyway. - if channel.is_live() { + fn list_channels_with_filter)) -> bool>(&self, f: F) -> Vec { + let mut res = Vec::new(); + { + let channel_state = self.channel_state.lock().unwrap(); + res.reserve(channel_state.by_id.len()); + for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), short_channel_id: channel.get_short_channel_id(), remote_network_id: channel.get_their_node_id(), + counterparty_features: InitFeatures::empty(), channel_value_satoshis: channel.get_value_satoshis(), inbound_capacity_msat, outbound_capacity_msat, user_id: channel.get_user_id(), - is_live: true, + is_live: channel.is_live(), }); } } + let per_peer_state = self.per_peer_state.read().unwrap(); + for chan in res.iter_mut() { + if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) { + chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone(); + } + } res } + /// Gets the list of open channels, in random order. See ChannelDetail field documentation for + /// more information. + pub fn list_channels(&self) -> Vec { + self.list_channels_with_filter(|_| true) + } + + /// Gets the list of usable channels, in random order. Useful as an argument to + /// Router::get_route to ensure non-announced channels are used. + /// + /// These are guaranteed to have their is_live value set to true, see the documentation for + /// ChannelDetails::is_live for more info on exactly what the criteria are. + pub fn list_usable_channels(&self) -> Vec { + // Note we use is_live here instead of usable which leads to somewhat confused + // internal/external nomenclature, but that's ok cause that's probably what the user + // really wanted anyway. + self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. @@ -866,22 +912,30 @@ impl ChannelManager { } let mut chacha = ChaCha20::new(&rho, &[0u8; 8]); - let next_hop_data = { - let mut decoded = [0; 65]; - chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded); - match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) { + let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) }; + let (next_hop_data, next_hop_hmac) = { + match msgs::OnionHopData::read(&mut chacha_stream) { Err(err) => { let error_code = match err { msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte + msgs::DecodeError::UnknownRequiredFeature| + msgs::DecodeError::InvalidValue| + msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload _ => 0x2000 | 2, // Should never happen }; return_err!("Unable to decode our hop data", error_code, &[0;0]); }, - Ok(msg) => msg + Ok(msg) => { + let mut hmac = [0; 32]; + if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) { + return_err!("Unable to decode hop data", 0x4000 | 22, &[0;0]); + } + (msg, hmac) + }, } }; - let pending_forward_info = if next_hop_data.hmac == [0; 32] { + let pending_forward_info = if next_hop_hmac == [0; 32] { #[cfg(test)] { // In tests, make sure that the initial onion pcket data is, at least, non-0. @@ -891,10 +945,11 @@ impl ChannelManager { // as-is (and were originally 0s). // Of course reverse path calculation is still pretty easy given naive routing // algorithms, but this fixes the most-obvious case. - let mut new_packet_data = [0; 19*65]; - chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]); - assert_ne!(new_packet_data[0..65], [0; 65][..]); - assert_ne!(new_packet_data[..], [0; 19*65][..]); + let mut next_bytes = [0; 32]; + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); } // OUR PAYMENT! @@ -903,11 +958,11 @@ impl ChannelManager { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } // final_incorrect_htlc_amount - if next_hop_data.data.amt_to_forward > msg.amount_msat { + if next_hop_data.amt_to_forward > msg.amount_msat { return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); } // final_incorrect_cltv_expiry - if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry { + if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } @@ -916,18 +971,29 @@ impl ChannelManager { // instead we stay symmetric with the forwarding case, only responding (after a // delay) once they've send us a commitment_signed! - PendingHTLCStatus::Forward(PendingForwardHTLCInfo { + PendingHTLCStatus::Forward(PendingHTLCInfo { onion_packet: None, payment_hash: msg.payment_hash.clone(), short_channel_id: 0, incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.data.amt_to_forward, - outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, }) } else { let mut new_packet_data = [0; 20*65]; - chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]); - chacha.process(&SIXTY_FIVE_ZEROS[..], &mut new_packet_data[19*65..]); + let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); + #[cfg(debug_assertions)] + { + // Check two things: + // a) that the behavior of our stream here will return Ok(0) even if the TLV + // read above emptied out our buffer and the unwrap() wont needlessly panic + // b) that we didn't somehow magically end up with extra data. + let mut t = [0; 1]; + debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); + } + // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we + // fill the onion hop data we'll forward to our next-hop peer. + chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); @@ -946,21 +1012,29 @@ impl ChannelManager { version: 0, public_key, hop_data: new_packet_data, - hmac: next_hop_data.hmac.clone(), + hmac: next_hop_hmac.clone(), }; - PendingHTLCStatus::Forward(PendingForwardHTLCInfo { + let short_channel_id = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::FinalNode => { + return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); + }, + }; + + PendingHTLCStatus::Forward(PendingHTLCInfo { onion_packet: Some(outgoing_packet), payment_hash: msg.payment_hash.clone(), - short_channel_id: next_hop_data.data.short_channel_id, + short_channel_id: short_channel_id, incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.data.amt_to_forward, - outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, }) }; channel_state = Some(self.channel_state.lock().unwrap()); - if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { + if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); let forwarding_id = match id_option { @@ -1097,6 +1171,9 @@ impl ChannelManager { let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv), APIError::RouteError{err: "Pubkey along hop was maliciously selected"}); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?; + if onion_utils::route_size_insane(&onion_payloads) { + return Err(APIError::RouteError{err: "Route had too large size once"}); + } let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash); let _ = self.total_consistency_lock.read().unwrap(); @@ -1245,6 +1322,37 @@ impl ChannelManager { }) } + /// Generates a signed node_announcement from the given arguments and creates a + /// BroadcastNodeAnnouncement event. + /// + /// RGB is a node "color" and alias a printable human-readable string to describe this node to + /// humans. They carry no in-protocol meaning. + /// + /// addresses represent the set (possibly empty) of socket addresses on which this node accepts + /// incoming connections. + pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: msgs::NetAddressSet) { + let _ = self.total_consistency_lock.read().unwrap(); + + let announcement = msgs::UnsignedNodeAnnouncement { + features: NodeFeatures::supported(), + timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, + node_id: self.get_our_node_id(), + rgb, alias, + addresses: addresses.to_vec(), + excess_address_data: Vec::new(), + excess_data: Vec::new(), + }; + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); + + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { + msg: msgs::NodeAnnouncement { + signature: self.secp_ctx.sign(&msghash, &self.our_network_key), + contents: announcement + }, + }); + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -2079,7 +2187,7 @@ impl ChannelManager { // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. - if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { + if let PendingHTLCStatus::Forward(PendingHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info { let chan_update = self.get_channel_update(chan.get()); pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -2209,7 +2317,7 @@ impl ChannelManager { } #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingHTLCInfo, u64)>)]) { for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut forward_event = None; if !pending_forwards.is_empty() { @@ -2467,7 +2575,7 @@ impl ChannelManager { } } -impl events::MessageSendEventsProvider for ChannelManager { +impl events::MessageSendEventsProvider for ChannelManager where M::Target: ManyChannelMonitor { fn get_and_clear_pending_msg_events(&self) -> Vec { // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on @@ -2492,7 +2600,7 @@ impl events::MessageSendEventsProvider for ChannelManag } } -impl events::EventsProvider for ChannelManager { +impl events::EventsProvider for ChannelManager where M::Target: ManyChannelMonitor { fn get_and_clear_pending_events(&self) -> Vec { // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on @@ -2517,7 +2625,7 @@ impl events::EventsProvider for ChannelManager ChainListener for ChannelManager { +impl ChainListener for ChannelManager where M::Target: ManyChannelMonitor { fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) { let header_hash = header.bitcoin_hash(); log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len()); @@ -2631,7 +2739,7 @@ impl ChainListener for ChannelManager { } } -impl ChannelMessageHandler for ChannelManager { +impl ChannelMessageHandler for ChannelManager where M::Target: ManyChannelMonitor { fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) { let _ = self.total_consistency_lock.read().unwrap(); let res = self.internal_open_channel(their_node_id, their_features, msg); @@ -2780,6 +2888,7 @@ impl ChannelMessageHandler for ChannelManager ChannelMessageHandler for ChannelManager ChannelMessageHandler for ChannelManager node_id != their_node_id, &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, } }); } + if no_channels_remain { + self.per_peer_state.write().unwrap().remove(their_node_id); + } + for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } @@ -2853,10 +2969,25 @@ impl ChannelMessageHandler for ChannelManager { + e.insert(Mutex::new(PeerState { + latest_features: init_msg.features.clone(), + })); + }, + hash_map::Entry::Occupied(e) => { + e.get().lock().unwrap().latest_features = init_msg.features.clone(); + }, + } + } + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; @@ -2898,7 +3029,7 @@ impl ChannelMessageHandler for ChannelManager(&self, writer: &mut W) -> Result<(), ::std::io::Error> { self.onion_packet.write(writer)?; self.incoming_shared_secret.write(writer)?; @@ -2910,9 +3041,9 @@ impl Writeable for PendingForwardHTLCInfo { } } -impl Readable for PendingForwardHTLCInfo { - fn read(reader: &mut R) -> Result { - Ok(PendingForwardHTLCInfo { +impl Readable for PendingHTLCInfo { + fn read(reader: &mut R) -> Result { + Ok(PendingHTLCInfo { onion_packet: Readable::read(reader)?, incoming_shared_secret: Readable::read(reader)?, payment_hash: Readable::read(reader)?, @@ -3079,7 +3210,7 @@ impl Readable for HTLCForwardInfo { } } -impl Writeable for ChannelManager { +impl Writeable for ChannelManager where M::Target: ManyChannelMonitor { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { let _ = self.total_consistency_lock.write().unwrap(); @@ -3123,6 +3254,16 @@ impl Writeable for ChannelManager Writeable for ChannelManager { +pub struct ChannelManagerReadArgs<'a, ChanSigner: ChannelKeys, M: Deref> where M::Target: ManyChannelMonitor { /// The keys provider which will give us relevant keys. Some keys will be loaded during /// deserialization. pub keys_manager: Arc>, @@ -3156,7 +3297,7 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: ChannelKeys> { /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that /// you have deserialized ChannelMonitors separately and will add them to your /// ManyChannelMonitor after deserializing this ChannelManager. - pub monitor: Arc, + pub monitor: M, /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be /// used to broadcast the latest local commitment transactions of channels which must be @@ -3182,8 +3323,8 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: ChannelKeys> { pub channel_monitors: &'a mut HashMap, } -impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable> ReadableArgs> for (Sha256dHash, ChannelManager) { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner>) -> Result { +impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable, M: Deref> ReadableArgs> for (Sha256dHash, ChannelManager) where M::Target: ManyChannelMonitor { + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M>) -> Result { let _ver: u8 = Readable::read(reader)?; let min_ver: u8 = Readable::read(reader)?; if min_ver > SERIALIZATION_VERSION { @@ -3256,6 +3397,18 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable> ReadableArg claimable_htlcs.insert(payment_hash, previous_hops); } + let peer_count: u64 = Readable::read(reader)?; + let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128)); + for _ in 0..peer_count { + let peer_pubkey = Readable::read(reader)?; + let peer_state = PeerState { + latest_features: Readable::read(reader)?, + }; + per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); + } + + let last_node_announcement_serial: u32 = Readable::read(reader)?; + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -3275,6 +3428,10 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable> ReadableArg }), our_network_key: args.keys_manager.get_node_secret(), + last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), + + per_peer_state: RwLock::new(per_peer_state), + pending_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), keys_manager: args.keys_manager,